diff --git a/.docs/Notebooks/export_vtk_tutorial.py b/.docs/Notebooks/export_vtk_tutorial.py index cdaaaac92..c21d40919 100644 --- a/.docs/Notebooks/export_vtk_tutorial.py +++ b/.docs/Notebooks/export_vtk_tutorial.py @@ -30,6 +30,7 @@ import os import sys from pathlib import Path +from pprint import pformat from tempfile import TemporaryDirectory import numpy as np @@ -37,17 +38,17 @@ import flopy from flopy.export import vtk -sys.path.append(os.path.join("..", "common")) -import notebook_utils - print(sys.version) print(f"flopy version: {flopy.__version__}") # - # load model for examples nam_file = "freyberg.nam" -prj_root = notebook_utils.get_project_root_path() -model_ws = prj_root / "examples" / "data" / "freyberg_multilayer_transient" +model_ws = Path( + os.path.join( + "..", "..", "examples", "data", "freyberg_multilayer_transient" + ) +) ml = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False) # Create a temporary workspace. @@ -375,9 +376,330 @@ # # The `Vtk` class supports writing MODPATH pathline/timeseries data to a VTK file. To start the example, let's first load and run a MODPATH simulation (see flopy3_modpath7_unstructured_example for details) and then add the output to a `Vtk` object. + # + # load and run the vertex grid model and modpath7 -notebook_utils.run(workspace) +def run_vertex_grid_example(ws): + """load and run vertex grid example""" + if not os.path.exists(ws): + os.mkdir(ws) + + from flopy.utils.gridgen import Gridgen + + Lx = 10000.0 + Ly = 10500.0 + nlay = 3 + nrow = 21 + ncol = 20 + delr = Lx / ncol + delc = Ly / nrow + top = 400 + botm = [220, 200, 0] + + ms = flopy.modflow.Modflow() + dis5 = flopy.modflow.ModflowDis( + ms, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=delr, + delc=delc, + top=top, + botm=botm, + ) + + model_name = "mp7p2" + model_ws = os.path.join(ws, "mp7_ex2", "mf6") + gridgen_ws = os.path.join(model_ws, "gridgen") + g = Gridgen(ms.modelgrid, model_ws=gridgen_ws) + + rf0shp = os.path.join(gridgen_ws, "rf0") + xmin = 7 * delr + xmax = 12 * delr + ymin = 8 * delc + ymax = 13 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 1, range(nlay)) + + rf1shp = os.path.join(gridgen_ws, "rf1") + xmin = 8 * delr + xmax = 11 * delr + ymin = 9 * delc + ymax = 12 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 2, range(nlay)) + + rf2shp = os.path.join(gridgen_ws, "rf2") + xmin = 9 * delr + xmax = 10 * delr + ymin = 10 * delc + ymax = 11 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 3, range(nlay)) + + g.build(verbose=False) + + gridprops = g.get_gridprops_disv() + ncpl = gridprops["ncpl"] + top = gridprops["top"] + botm = gridprops["botm"] + nvert = gridprops["nvert"] + vertices = gridprops["vertices"] + cell2d = gridprops["cell2d"] + # cellxy = gridprops['cellxy'] + + # create simulation + sim = flopy.mf6.MFSimulation( + sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=model_ws + ) + + # create tdis package + tdis_rc = [(1000.0, 1, 1.0)] + tdis = flopy.mf6.ModflowTdis( + sim, pname="tdis", time_units="DAYS", perioddata=tdis_rc + ) + + # create gwf model + gwf = flopy.mf6.ModflowGwf( + sim, modelname=model_name, model_nam_file=f"{model_name}.nam" + ) + gwf.name_file.save_flows = True + + # create iterative model solution and register the gwf model with it + ims = flopy.mf6.ModflowIms( + sim, + pname="ims", + print_option="SUMMARY", + complexity="SIMPLE", + outer_hclose=1.0e-5, + outer_maximum=100, + under_relaxation="NONE", + inner_maximum=100, + inner_hclose=1.0e-6, + rcloserecord=0.1, + linear_acceleration="BICGSTAB", + scaling_method="NONE", + reordering_method="NONE", + relaxation_factor=0.99, + ) + sim.register_ims_package(ims, [gwf.name]) + + # disv + disv = flopy.mf6.ModflowGwfdisv( + gwf, + nlay=nlay, + ncpl=ncpl, + top=top, + botm=botm, + nvert=nvert, + vertices=vertices, + cell2d=cell2d, + ) + + # initial conditions + ic = flopy.mf6.ModflowGwfic(gwf, pname="ic", strt=320.0) + + # node property flow + npf = flopy.mf6.ModflowGwfnpf( + gwf, + xt3doptions=[("xt3d")], + save_specific_discharge=True, + icelltype=[1, 0, 0], + k=[50.0, 0.01, 200.0], + k33=[10.0, 0.01, 20.0], + ) + + # wel + wellpoints = [(4750.0, 5250.0)] + welcells = g.intersect(wellpoints, "point", 0) + # welspd = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=1, aux_vars=['iface']) + welspd = [[(2, icpl), -150000, 0] for icpl in welcells["nodenumber"]] + wel = flopy.mf6.ModflowGwfwel( + gwf, + print_input=True, + auxiliary=[("iface",)], + stress_period_data=welspd, + ) + + # rch + aux = [np.ones(ncpl, dtype=int) * 6] + rch = flopy.mf6.ModflowGwfrcha( + gwf, recharge=0.005, auxiliary=[("iface",)], aux={0: [6]} + ) + # riv + riverline = [[(Lx - 1.0, Ly), (Lx - 1.0, 0.0)]] + rivcells = g.intersect(riverline, "line", 0) + rivspd = [ + [(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"] + ] + riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd) + + # output control + oc = flopy.mf6.ModflowGwfoc( + gwf, + pname="oc", + budget_filerecord=f"{model_name}.cbb", + head_filerecord=f"{model_name}.hds", + headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + sim.write_simulation() + success, buff = sim.run_simulation(silent=True, report=True) + if success: + for line in buff: + print(line) + else: + raise ValueError("Failed to run.") + + mp_namea = f"{model_name}a_mp" + mp_nameb = f"{model_name}b_mp" + + pcoord = np.array( + [ + [0.000, 0.125, 0.500], + [0.000, 0.375, 0.500], + [0.000, 0.625, 0.500], + [0.000, 0.875, 0.500], + [1.000, 0.125, 0.500], + [1.000, 0.375, 0.500], + [1.000, 0.625, 0.500], + [1.000, 0.875, 0.500], + [0.125, 0.000, 0.500], + [0.375, 0.000, 0.500], + [0.625, 0.000, 0.500], + [0.875, 0.000, 0.500], + [0.125, 1.000, 0.500], + [0.375, 1.000, 0.500], + [0.625, 1.000, 0.500], + [0.875, 1.000, 0.500], + ] + ) + nodew = gwf.disv.ncpl.array * 2 + welcells["nodenumber"][0] + plocs = [nodew for i in range(pcoord.shape[0])] + + # create particle data + pa = flopy.modpath.ParticleData( + plocs, + structured=False, + localx=pcoord[:, 0], + localy=pcoord[:, 1], + localz=pcoord[:, 2], + drape=0, + ) + + # create backward particle group + fpth = f"{mp_namea}.sloc" + pga = flopy.modpath.ParticleGroup( + particlegroupname="BACKWARD1", particledata=pa, filename=fpth + ) + + facedata = flopy.modpath.FaceDataType( + drape=0, + verticaldivisions1=10, + horizontaldivisions1=10, + verticaldivisions2=10, + horizontaldivisions2=10, + verticaldivisions3=10, + horizontaldivisions3=10, + verticaldivisions4=10, + horizontaldivisions4=10, + rowdivisions5=0, + columndivisions5=0, + rowdivisions6=4, + columndivisions6=4, + ) + pb = flopy.modpath.NodeParticleData(subdivisiondata=facedata, nodes=nodew) + # create forward particle group + fpth = f"{mp_nameb}.sloc" + pgb = flopy.modpath.ParticleGroupNodeTemplate( + particlegroupname="BACKWARD2", particledata=pb, filename=fpth + ) + + # create modpath files + mp = flopy.modpath.Modpath7( + modelname=mp_namea, flowmodel=gwf, exe_name="mp7", model_ws=model_ws + ) + flopy.modpath.Modpath7Bas(mp, porosity=0.1) + flopy.modpath.Modpath7Sim( + mp, + simulationtype="combined", + trackingdirection="backward", + weaksinkoption="pass_through", + weaksourceoption="pass_through", + referencetime=0.0, + stoptimeoption="extend", + timepointdata=[500, 1000.0], + particlegroups=pga, + ) + + # write modpath datasets + mp.write_input() + + # run modpath + success, buff = mp.run_model(silent=True, report=True) + if success: + for line in buff: + print(line) + else: + raise ValueError("Failed to run.") + + # create modpath files + mp = flopy.modpath.Modpath7( + modelname=mp_nameb, flowmodel=gwf, exe_name="mp7", model_ws=model_ws + ) + flopy.modpath.Modpath7Bas(mp, porosity=0.1) + flopy.modpath.Modpath7Sim( + mp, + simulationtype="endpoint", + trackingdirection="backward", + weaksinkoption="pass_through", + weaksourceoption="pass_through", + referencetime=0.0, + stoptimeoption="extend", + particlegroups=pgb, + ) + + # write modpath datasets + mp.write_input() + + # run modpath + success, buff = mp.run_model(silent=True, report=True) + assert success, pformat(buff) + + +run_vertex_grid_example(workspace) # check if model ran properly modelpth = workspace / "mp7_ex2" / "mf6" diff --git a/.docs/Notebooks/groundwater2023_watershed_example.py b/.docs/Notebooks/groundwater2023_watershed_example.py index 686520926..8621505dd 100644 --- a/.docs/Notebooks/groundwater2023_watershed_example.py +++ b/.docs/Notebooks/groundwater2023_watershed_example.py @@ -29,12 +29,15 @@ import matplotlib.gridspec as gridspec import matplotlib.pyplot as plt import numpy as np -from shapely.geometry import LineString +import shapely +import yaml +from shapely.geometry import LineString, Polygon import flopy import flopy.plot.styles as styles from flopy.discretization import StructuredGrid, VertexGrid from flopy.utils.gridgen import Gridgen +from flopy.utils.gridintersect import GridIntersect from flopy.utils.triangle import Triangle from flopy.utils.voronoi import VoronoiGrid @@ -43,13 +46,70 @@ print(f"matplotlib version: {mpl.__version__}") print(f"flopy version: {flopy.__version__}") -# import all plot style information from defaults.py -sys.path.append("../common") -from groundwater2023_utils import ( - densify_geometry, - geometries, - set_idomain, - string2geom, + +# define a few utility functions +def string2geom(geostring, conversion=None): + if conversion is None: + multiplier = 1.0 + else: + multiplier = float(conversion) + res = [] + for line in geostring.split("\n"): + if not any(line): + continue + line = line.strip() + line = line.split(" ") + x = float(line[0]) * multiplier + y = float(line[1]) * multiplier + res.append((x, y)) + return res + + +def densify_geometry(line, step, keep_internal_nodes=True): + xy = [] # list of tuple of coordinates + lines_strings = [] + if keep_internal_nodes: + for idx in range(1, len(line)): + lines_strings.append( + shapely.geometry.LineString(line[idx - 1 : idx + 1]) + ) + else: + lines_strings = [shapely.geometry.LineString(line)] + + for line_string in lines_strings: + length_m = line_string.length # get the length + for distance in np.arange(0, length_m + step, step): + point = line_string.interpolate(distance) + xy_tuple = (point.x, point.y) + if xy_tuple not in xy: + xy.append(xy_tuple) + # make sure the end point is in xy + if keep_internal_nodes: + xy_tuple = line_string.coords[-1] + if xy_tuple not in xy: + xy.append(xy_tuple) + + return xy + + +# function to set the active and inactive model area +def set_idomain(grid, boundary): + ix = GridIntersect(grid, method="vertex", rtree=True) + result = ix.intersect(Polygon(boundary)) + idx = [coords for coords in result.cellids] + idx = np.array(idx, dtype=int) + nr = idx.shape[0] + if idx.ndim == 1: + idx = idx.reshape((nr, 1)) + idx = tuple([idx[:, i] for i in range(idx.shape[1])]) + idomain = np.zeros(grid.shape[1:], dtype=int) + idomain[idx] = 1 + idomain = idomain.reshape(grid.shape) + grid.idomain = idomain + + +geometries = yaml.safe_load( + open(pl.Path("../../examples/data/groundwater2023/geometries.yml")) ) # basic figure size diff --git a/.docs/Notebooks/lgr_tutorial01.py b/.docs/Notebooks/mf6_lgr_tutorial01.py similarity index 99% rename from .docs/Notebooks/lgr_tutorial01.py rename to .docs/Notebooks/mf6_lgr_tutorial01.py index 72b9c12dd..172756ccf 100644 --- a/.docs/Notebooks/lgr_tutorial01.py +++ b/.docs/Notebooks/mf6_lgr_tutorial01.py @@ -462,23 +462,11 @@ # retrieve the exchange data from the lgr object exchangedata = lgr.get_exchange_data(angldegx=True, cdist=True) nexg = len(exchangedata) - - # When creating the exchange, which couples the child and parent - # models, use the xt3d option, which is an alternative to the - # ghost-node correction. This xt3d option was added as a new - # capability for the gwt-gwt and gwf-gwf exchanges in MODFLOW version 6.3.0. - exg = flopy.mf6.ModflowGwtgwt( - sim, - exgtype="GWT6-GWT6", - gwfmodelname1=gwfp.name, - gwfmodelname2=gwfc.name, - # xt3d=True, - auxiliary=["angldegx", "cdist"], - exgmnamea=pname, - exgmnameb=cname, - nexg=nexg, - exchangedata=exchangedata, - ) + exg_data = { + "filename": "exg_data.bin", + "data": exchangedata, + "binary": True, + } # Set up the parent model and use the lgr.parent object to # help provide the necessary information. @@ -522,6 +510,23 @@ ) sim.register_ims_package(ims_tran, [gwtp.name, gwtc.name]) + # When creating the exchange, which couples the child and parent + # models, use the xt3d option, which is an alternative to the + # ghost-node correction. This xt3d option was added as a new + # capability for the gwt-gwt and gwf-gwf exchanges in MODFLOW version 6.3.0. + exg = flopy.mf6.ModflowGwtgwt( + sim, + exgtype="GWT6-GWT6", + gwfmodelname1=gwfp.name, + gwfmodelname2=gwfc.name, + # xt3d=True, + auxiliary=["angldegx", "cdist"], + exgmnamea=pname, + exgmnameb=cname, + nexg=nexg, + exchangedata=exg_data, + ) + # couple flow and transport models gwfgwt_p = flopy.mf6.ModflowGwfgwt( sim, diff --git a/.docs/Notebooks/mf6_parallel_model_splitting_example.py b/.docs/Notebooks/mf6_parallel_model_splitting_example.py index 77c4019be..a753f1fcb 100644 --- a/.docs/Notebooks/mf6_parallel_model_splitting_example.py +++ b/.docs/Notebooks/mf6_parallel_model_splitting_example.py @@ -26,14 +26,35 @@ import matplotlib.pyplot as plt import numpy as np +import yaml import flopy from flopy.mf6.utils import Mf6Splitter from flopy.plot import styles from flopy.utils.geometry import LineString, Polygon -sys.path.append("../common") -from notebook_utils import geometries, string2geom +geometries = yaml.safe_load( + open(Path("../../examples/data/groundwater2023/geometries.yml")) +) + + +# define a few utility functions +def string2geom(geostring, conversion=None): + if conversion is None: + multiplier = 1.0 + else: + multiplier = float(conversion) + res = [] + for line in geostring.split("\n"): + if not any(line): + continue + line = line.strip() + line = line.split(" ") + x = float(line[0]) * multiplier + y = float(line[1]) * multiplier + res.append((x, y)) + return res + # ## Example 1: splitting a simple structured grid model # diff --git a/.docs/Notebooks/mf6_simple_model_example.py b/.docs/Notebooks/mf6_simple_model_example.py index 2a173c4ed..aecc57ed3 100644 --- a/.docs/Notebooks/mf6_simple_model_example.py +++ b/.docs/Notebooks/mf6_simple_model_example.py @@ -20,9 +20,8 @@ # ### Setup the Notebook Environment -import os - # + +import os import sys from pprint import pformat from tempfile import TemporaryDirectory @@ -272,8 +271,9 @@ # read the cell budget file fname = os.path.join(workspace, f"{name}.cbb") cbb = flopy.utils.CellBudgetFile(fname, precision="double") -cbb.list_records() +cbb.headers.T +# + flowja = cbb.get_data(text="FLOW-JA-FACE")[0][0, 0, :] chdflow = cbb.get_data(text="CHD")[0] # - diff --git a/.docs/Notebooks/mfusg_conduit_examples.py b/.docs/Notebooks/mfusg_conduit_examples.py index bf15054bf..187cabd62 100644 --- a/.docs/Notebooks/mfusg_conduit_examples.py +++ b/.docs/Notebooks/mfusg_conduit_examples.py @@ -104,8 +104,9 @@ # + cbb_file = os.path.join(mf.model_ws, "ex3.clncbb") cbb = flopy.utils.CellBudgetFile(cbb_file) -# cbb.list_records() +cbb.headers +# + simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0] for i in range(nper - 1): simflow = np.append( @@ -297,8 +298,9 @@ # + cbb_file = os.path.join(mf.model_ws, f"{modelname}.clncb") cbb = flopy.utils.CellBudgetFile(cbb_file) -# cbb.list_records() +cbb.headers +# + simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0] for i in range(nper - 1): simflow = np.append( @@ -392,8 +394,9 @@ # + cbb_file = os.path.join(mf.model_ws, f"{modelname}.clncb") cbb = flopy.utils.CellBudgetFile(cbb_file) -# cbb.list_records() +cbb.headers +# + simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0] for i in range(nper - 1): simflow = np.append( @@ -490,8 +493,9 @@ # + cbb_file = os.path.join(mf.model_ws, f"{modelname}.clncb") cbb = flopy.utils.CellBudgetFile(cbb_file) -# cbb.list_records() +cbb.headers +# + simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0] for i in range(nper - 1): simflow = np.append( @@ -575,8 +579,9 @@ # + cbb_file = os.path.join(mf.model_ws, f"{modelname}.clncb") cbb = flopy.utils.CellBudgetFile(cbb_file) -# cbb.list_records() +cbb.headers +# + simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0] for i in range(nper - 1): simflow = np.append( diff --git a/.docs/Notebooks/modpath7_structured_example.py b/.docs/Notebooks/modpath7_structured_example.py deleted file mode 100644 index 299da4b2d..000000000 --- a/.docs/Notebooks/modpath7_structured_example.py +++ /dev/null @@ -1,484 +0,0 @@ -# --- -# jupyter: -# jupytext: -# notebook_metadata_filter: all -# text_representation: -# extension: .py -# format_name: light -# format_version: '1.5' -# jupytext_version: 1.14.5 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# metadata: -# section: modpath -# authors: -# - name: Joseph Hughes -# --- - -# # Using MODPATH 7 with structured grids -# -# This notebook demonstrates how to create and run example 1a from the MODPATH 7 documentation for MODFLOW-2005 and MODFLOW 6. The notebooks also shows how to create subsets of endpoint output and plot MODPATH results on PlotMapView objects. - -import os - -# + -import sys -from tempfile import TemporaryDirectory - -import matplotlib as mpl -import matplotlib.pyplot as plt -import numpy as np -from numpy.lib.recfunctions import repack_fields - -import flopy - -print(sys.version) -print(f"numpy version: {np.__version__}") -print(f"matplotlib version: {mpl.__version__}") -print(f"flopy version: {flopy.__version__}") - -# temporary directory -temp_dir = TemporaryDirectory() -workspace = temp_dir.name -# - - -# ### Flow model data - -nper, nstp, perlen, tsmult = 1, 1, 1.0, 1.0 -nlay, nrow, ncol = 3, 21, 20 -delr = delc = 500.0 -top = 400.0 -botm = [220.0, 200.0, 0.0] -laytyp = [1, 0, 0] -kh = [50.0, 0.01, 200.0] -kv = [10.0, 0.01, 20.0] -wel_loc = (2, 10, 9) -wel_q = -150000.0 -rch = 0.005 -riv_h = 320.0 -riv_z = 317.0 -riv_c = 1.0e5 - -# ### MODPATH 7 data - -# + -# MODPATH zones -zone3 = np.ones((nrow, ncol), dtype=np.int32) -zone3[wel_loc[1:]] = 2 -zones = [1, 1, zone3] - -# create particles -# particle group 1 -plocs = [] -pids = [] -for idx in range(nrow): - plocs.append((0, idx, 2)) - pids.append(idx) -part0 = flopy.modpath.ParticleData( - plocs, drape=0, structured=True, particleids=pids -) -pg0 = flopy.modpath.ParticleGroup( - particlegroupname="PG1", particledata=part0, filename="ex01a.pg1.sloc" -) - -# particle group 2 -v = [(2, 0, 0), (0, 20, 0)] -part1 = flopy.modpath.ParticleData( - v, drape=1, structured=True, particleids=[1000, 1001] -) -pg1 = flopy.modpath.ParticleGroup( - particlegroupname="PG2", particledata=part1, filename="ex01a.pg2.sloc" -) - -locsa = [[0, 0, 0, 0, nrow - 1, ncol - 1], [1, 0, 0, 1, nrow - 1, ncol - 1]] -locsb = [[2, 0, 0, 2, nrow - 1, ncol - 1]] -sd = flopy.modpath.CellDataType( - drape=0, columncelldivisions=1, rowcelldivisions=1, layercelldivisions=1 -) -p = flopy.modpath.LRCParticleData( - subdivisiondata=[sd, sd], lrcregions=[locsa, locsb] -) -pg2 = flopy.modpath.ParticleGroupLRCTemplate( - particlegroupname="PG3", particledata=p, filename="ex01a.pg3.sloc" -) - -particlegroups = [pg2] - -# default iface for MODFLOW-2005 and MODFLOW 6 -defaultiface = {"RECHARGE": 6, "ET": 6} -defaultiface6 = {"RCH": 6, "EVT": 6} -# - - -# ### MODPATH 7 using MODFLOW-2005 -# -# #### Create and run MODFLOW-2005 - -# + -ws = os.path.join(workspace, "mp7_ex1_mf2005_dis") -nm = "ex01_mf2005" -exe_name = "mf2005" -iu_cbc = 130 -m = flopy.modflow.Modflow(nm, model_ws=ws, exe_name=exe_name) -flopy.modflow.ModflowDis( - m, - nlay=nlay, - nrow=nrow, - ncol=ncol, - nper=nper, - itmuni=4, - lenuni=2, - perlen=perlen, - nstp=nstp, - tsmult=tsmult, - steady=True, - delr=delr, - delc=delc, - top=top, - botm=botm, -) -flopy.modflow.ModflowLpf( - m, ipakcb=iu_cbc, laytyp=laytyp, hk=kh, vka=kv, constantcv=True -) -flopy.modflow.ModflowBas(m, ibound=1, strt=top) -# recharge -flopy.modflow.ModflowRch(m, ipakcb=iu_cbc, rech=rch) -# wel -wd = [i for i in wel_loc] + [wel_q] -flopy.modflow.ModflowWel(m, ipakcb=iu_cbc, stress_period_data={0: wd}) -# river -rd = [] -for i in range(nrow): - rd.append([0, i, ncol - 1, riv_h, riv_c, riv_z]) -flopy.modflow.ModflowRiv(m, ipakcb=iu_cbc, stress_period_data={0: rd}) -# output control -flopy.modflow.ModflowOc( - m, stress_period_data={(0, 0): ["save head", "save budget", "print head"]} -) -flopy.modflow.ModflowPcg(m, hclose=1e-6, rclose=1e-6) - -m.write_input() -success, buff = m.run_model(silent=True, report=True) -assert success, "mf2005 model did not run" -for line in buff: - print(line) -# - - -# #### Create and run MODPATH 7 - -# + -# create modpath files -exe_name = "mp7" -mp = flopy.modpath.Modpath7( - modelname=f"{nm}_mp", flowmodel=m, exe_name=exe_name, model_ws=ws -) -mpbas = flopy.modpath.Modpath7Bas(mp, porosity=0.1, defaultiface=defaultiface) -mpsim = flopy.modpath.Modpath7Sim( - mp, - simulationtype="combined", - trackingdirection="forward", - weaksinkoption="pass_through", - weaksourceoption="pass_through", - budgetoutputoption="summary", - budgetcellnumbers=[1049, 1259], - traceparticledata=[1, 1000], - referencetime=[0, 0, 0.0], - stoptimeoption="extend", - timepointdata=[500, 1000.0], - zonedataoption="on", - zones=zones, - particlegroups=particlegroups, -) - -# write modpath datasets -mp.write_input() - -# run modpath -success, buff = mp.run_model(silent=True, report=True) -assert success, "mp7 failed to run" -for line in buff: - print(line) -# - - -# #### Load MODPATH 7 output - -# Get locations to extract pathline data - -nodew = m.dis.get_node([wel_loc]) -riv_locs = repack_fields(m.riv.stress_period_data[0][["k", "i", "j"]]) -nodesr = m.dis.get_node(riv_locs.tolist()) - -# Pathline data - -fpth = os.path.join(ws, f"{nm}_mp.mppth") -p = flopy.utils.PathlineFile(fpth) -pw0 = p.get_destination_pathline_data(nodew, to_recarray=True) -pr0 = p.get_destination_pathline_data(nodesr, to_recarray=True) - -# Endpoint data -# -# Get particles that terminate in the well - -fpth = os.path.join(ws, f"{nm}_mp.mpend") -e = flopy.utils.EndpointFile(fpth) -well_epd = e.get_destination_endpoint_data(dest_cells=nodew) -well_epd.shape - -# Get particles that terminate in the river boundaries - -riv_epd = e.get_destination_endpoint_data(dest_cells=nodesr) -riv_epd.shape - -# Merge the particles that end in the well and the river boundaries. - -epd0 = np.concatenate((well_epd, riv_epd)) -epd0.shape - -# #### Plot MODPATH 7 output - -mm = flopy.plot.PlotMapView(model=m) -mm.plot_grid(lw=0.5) -mm.plot_pathline(pw0, layer="all", colors="blue", label="captured by wells") -mm.plot_pathline(pr0, layer="all", colors="green", label="captured by rivers") -mm.plot_endpoint(epd0, direction="starting", colorbar=True) -mm.ax.legend() - -# ### MODPATH 7 using MODFLOW 6 -# -# #### Create and run MODFLOW 6 - -# + -ws = os.path.join(workspace, "mp7_ex1_mf6_dis") -nm = "ex01_mf6" -exe_name = "mf6" - -# Create the Flopy simulation object -sim = flopy.mf6.MFSimulation( - sim_name=nm, exe_name="mf6", version="mf6", sim_ws=ws -) - -# Create the Flopy temporal discretization object -pd = (perlen, nstp, tsmult) -tdis = flopy.mf6.modflow.mftdis.ModflowTdis( - sim, pname="tdis", time_units="DAYS", nper=nper, perioddata=[pd] -) - -# Create the Flopy groundwater flow (gwf) model object -model_nam_file = f"{nm}.nam" -gwf = flopy.mf6.ModflowGwf( - sim, modelname=nm, model_nam_file=model_nam_file, save_flows=True -) - -# Create the Flopy iterative model solver (ims) Package object -ims = flopy.mf6.modflow.mfims.ModflowIms( - sim, - pname="ims", - complexity="SIMPLE", - outer_dvclose=1e-6, - inner_dvclose=1e-6, - rcloserecord=1e-6, -) - -# create gwf file -dis = flopy.mf6.modflow.mfgwfdis.ModflowGwfdis( - gwf, - pname="dis", - nlay=nlay, - nrow=nrow, - ncol=ncol, - length_units="FEET", - delr=delr, - delc=delc, - top=top, - botm=botm, -) -# Create the initial conditions package -ic = flopy.mf6.modflow.mfgwfic.ModflowGwfic(gwf, pname="ic", strt=top) - -# Create the node property flow package -npf = flopy.mf6.modflow.mfgwfnpf.ModflowGwfnpf( - gwf, pname="npf", icelltype=laytyp, k=kh, k33=kv -) - - -# recharge -flopy.mf6.modflow.mfgwfrcha.ModflowGwfrcha(gwf, recharge=rch) -# wel -wd = [(wel_loc, wel_q)] -flopy.mf6.modflow.mfgwfwel.ModflowGwfwel( - gwf, maxbound=1, stress_period_data={0: wd} -) -# river -rd = [] -for i in range(nrow): - rd.append([(0, i, ncol - 1), riv_h, riv_c, riv_z]) -flopy.mf6.modflow.mfgwfriv.ModflowGwfriv(gwf, stress_period_data={0: rd}) -# Create the output control package -headfile = f"{nm}.hds" -head_record = [headfile] -budgetfile = f"{nm}.cbb" -budget_record = [budgetfile] -saverecord = [("HEAD", "ALL"), ("BUDGET", "ALL")] -oc = flopy.mf6.modflow.mfgwfoc.ModflowGwfoc( - gwf, - pname="oc", - saverecord=saverecord, - head_filerecord=head_record, - budget_filerecord=budget_record, -) - -# Write the datasets -sim.write_simulation() -# Run the simulation -success, buff = sim.run_simulation(silent=True, report=True) -assert success, "mf6 model did not run" -for line in buff: - print(line) -# - - -# #### Create and run MODPATH 7 - -# + -# create modpath files -exe_name = "mp7" -mp = flopy.modpath.Modpath7( - modelname=f"{nm}_mp", flowmodel=gwf, exe_name=exe_name, model_ws=ws -) -mpbas = flopy.modpath.Modpath7Bas(mp, porosity=0.1, defaultiface=defaultiface6) -mpsim = flopy.modpath.Modpath7Sim( - mp, - simulationtype="combined", - trackingdirection="forward", - weaksinkoption="pass_through", - weaksourceoption="pass_through", - budgetoutputoption="summary", - budgetcellnumbers=[1049, 1259], - traceparticledata=[1, 1000], - referencetime=[0, 0, 0.0], - stoptimeoption="extend", - timepointdata=[500, 1000.0], - zonedataoption="on", - zones=zones, - particlegroups=particlegroups, -) - -# write modpath datasets -mp.write_input() - -# run modpath -success, buff = mp.run_model(silent=True, report=True) -assert success, "mp7 failed to run" -for line in buff: - print(line) -# - - -# #### Load MODPATH 7 output -# -# Pathline data - -fpth = os.path.join(ws, f"{nm}_mp.mppth") -p = flopy.utils.PathlineFile(fpth) -pw1 = p.get_destination_pathline_data(nodew, to_recarray=True) -pr1 = p.get_destination_pathline_data(nodesr, to_recarray=True) - -# Endpoint data -# -# Get particles that terminate in the well - -fpth = os.path.join(ws, f"{nm}_mp.mpend") -e = flopy.utils.EndpointFile(fpth) -well_epd = e.get_destination_endpoint_data(dest_cells=nodew) - -# Get particles that terminate in the river boundaries - -riv_epd = e.get_destination_endpoint_data(dest_cells=nodesr) - -# Merge the particles that end in the well and the river boundaries. - -epd1 = np.concatenate((well_epd, riv_epd)) - -# ### Plot MODPATH 7 output -# - -mm = flopy.plot.PlotMapView(model=gwf) -mm.plot_grid(lw=0.5) -mm.plot_pathline(pw1, layer="all", colors="blue", label="captured by wells") -mm.plot_pathline(pr1, layer="all", colors="green", label="captured by rivers") -mm.plot_endpoint(epd1, direction="starting", colorbar=True) -mm.ax.legend() - -# ### Compare MODPATH results -# -# Compare MODPATH results for MODFLOW-2005 and MODFLOW 6. Also show pathline points every 5th point. - -# + -f, axes = plt.subplots(ncols=3, nrows=1, sharey=True, figsize=(15, 10)) -axes = axes.flatten() -ax = axes[0] -ax.set_aspect("equal") -mm = flopy.plot.PlotMapView(model=m, ax=ax) -mm.plot_grid(lw=0.5) -mm.plot_pathline( - pw0, - layer="all", - colors="blue", - lw=1, - marker="o", - markercolor="black", - markersize=3, - markerevery=5, -) -mm.plot_pathline( - pr0, - layer="all", - colors="green", - lw=1, - marker="o", - markercolor="black", - markersize=3, - markerevery=5, -) -ax.set_title("MODFLOW-2005") - -ax = axes[1] -ax.set_aspect("equal") -mm = flopy.plot.PlotMapView(model=gwf, ax=ax) -mm.plot_grid(lw=0.5) -mm.plot_pathline( - pw1, - layer="all", - colors="blue", - lw=1, - marker="o", - markercolor="black", - markersize=3, - markerevery=5, -) -mm.plot_pathline( - pr1, - layer="all", - colors="green", - lw=1, - marker="o", - markercolor="black", - markersize=3, - markerevery=5, -) -ax.set_title("MODFLOW 6") - - -ax = axes[2] -ax.set_aspect("equal") -mm = flopy.plot.PlotMapView(model=m, ax=ax) -mm.plot_grid(lw=0.5) -mm.plot_pathline(pw1, layer="all", colors="blue", lw=1, label="MODFLOW 6") -mm.plot_pathline( - pw0, layer="all", colors="blue", lw=1, linestyle=":", label="MODFLOW-2005" -) -mm.plot_pathline(pr1, layer="all", colors="green", lw=1, label="_none") -mm.plot_pathline( - pr0, layer="all", colors="green", lw=1, linestyle=":", label="_none" -) -ax.legend() -ax.set_title("MODFLOW 2005 and MODFLOW 6") diff --git a/.docs/Notebooks/modpath7_structured_transient_example.py b/.docs/Notebooks/modpath7_structured_transient_example.py deleted file mode 100644 index c72280b3c..000000000 --- a/.docs/Notebooks/modpath7_structured_transient_example.py +++ /dev/null @@ -1,406 +0,0 @@ -# --- -# jupyter: -# jupytext: -# notebook_metadata_filter: all -# text_representation: -# extension: .py -# format_name: light -# format_version: '1.5' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# language_info: -# codemirror_mode: -# name: ipython -# version: 3 -# file_extension: .py -# mimetype: text/x-python -# name: python -# nbconvert_exporter: python -# pygments_lexer: ipython3 -# version: 3.9.12 -# metadata: -# authors: -# - name: Wes Bonelli -# section: modpath -# --- - -# # Using MODPATH 7 with structured grids (transient example) -# -# This notebook reproduces example 3a from the MODPATH 7 documentation, demonstrating a transient MODFLOW 6 simulation based on the same flow system as the basic structured and unstructured examples. Particles are released at 10 20-day intervals for the first 200 days of the simulation. 2 discharge wells are added 100,000 days into the simulation and pump at a constant rate for the remainder. There are three stress periods: -# -# | Stress period | Type | Time steps | Length (days) | -# |:--------------|:-------------|:-----------|:--------------| -# | 1 | steady-state | 1 | 100000 | -# | 2 | transient | 10 | 36500 | -# | 3 | steady-state | 1 | 100000 | -# -# ## Setting up the simulation -# -# First import FloPy and set up a temporary workspace. - -# + -import sys -from pathlib import Path -from tempfile import TemporaryDirectory - -import matplotlib as mpl -import matplotlib.pyplot as plt -import numpy as np - -proj_root = Path.cwd().parent.parent - -import flopy - -print(sys.version) -print(f"numpy version: {np.__version__}") -print(f"matplotlib version: {mpl.__version__}") -print(f"flopy version: {flopy.__version__}") - -temp_dir = TemporaryDirectory() -sim_name = "mp7_ex03a_mf6" -workspace = Path(temp_dir.name) / sim_name -# - - -# Define flow model data. - -nlay, nrow, ncol = 3, 21, 20 -delr = delc = 500.0 -top = 400.0 -botm = [220.0, 200.0, 0.0] -laytyp = [1, 0, 0] -kh = [50.0, 0.01, 200.0] -kv = [10.0, 0.01, 20.0] -rch = 0.005 -riv_h = 320.0 -riv_z = 317.0 -riv_c = 1.0e5 - -# Define well data. Although this notebook will refer to layer/row/column indices starting at 1, indices in FloPy (and more generally in Python) are zero-based. A negative discharge indicates pumping, while a positive value indicates injection. - -wells = [ - # layer, row, col, discharge - (0, 10, 9, -75000), - (2, 12, 4, -100000), -] - -# Define the drain location. - -drain = (0, 14, (9, 20)) - -# Configure locations for particle tracking to terminate. We have three explicitly defined termination zones: -# -# - `2`: the well in layer 1, at row 11, column 10 -# - `3`: the well in layer 3, at row 13, column 5 -# - `4`: the drain in layer 1, running through row 15 from column 10-20 -# -# MODFLOW 6 reserves zone number `1` to indicate that particles may move freely within the zone. -# -# The river running through column 20 is also a termination zone, but it doesn't need to be defined separately since we are using the RIV package. - -# + -zone_maps = [] - - -# zone 1 is the default (non-terminating regions) -def fill_zone_1(): - return np.ones((nrow, ncol), dtype=np.int32) - - -# zone map for layer 1 -za = fill_zone_1() -za[wells[0][1:3]] = 2 -za[drain[1], drain[2][0] : drain[2][1]] = 4 -zone_maps.append(za) - -# constant layer 2 (zone 1) -zone_maps.append(1) - -# zone map for layer 3 -za = fill_zone_1() -za[wells[1][1:3]] = 3 -zone_maps.append(za) -# - - -# Define particles to track. We release particles from the top of a 2x2 square of cells in the upper left of the model grid's top layer. - -rel_minl = rel_maxl = 1 -rel_minr = 2 -rel_maxr = 3 -rel_minc = 2 -rel_maxc = 3 -sd = flopy.modpath.CellDataType( - drape=0 -) # particles added at top of cell (no drape) -pd = flopy.modpath.LRCParticleData( - subdivisiondata=[sd], - lrcregions=[ - [[rel_minl, rel_minr, rel_minc, rel_maxl, rel_maxr, rel_maxc]] - ], -) -pg = flopy.modpath.ParticleGroupLRCTemplate( - particlegroupname="PG1", particledata=pd, filename=f"{sim_name}.pg1.sloc" -) -pgs = [pg] -defaultiface = {"RECHARGE": 6, "ET": 6} - -# Create the MODFLOW 6 simulation. - -# + -# simulation -sim = flopy.mf6.MFSimulation( - sim_name=sim_name, exe_name="mf6", version="mf6", sim_ws=workspace -) - -# temporal discretization -nper = 3 -pd = [ - # perlen, nstp, tsmult - (100000, 1, 1), - (36500, 10, 1), - (100000, 1, 1), -] -tdis = flopy.mf6.modflow.mftdis.ModflowTdis( - sim, pname="tdis", time_units="DAYS", nper=nper, perioddata=pd -) - -# groundwater flow (gwf) model -model_nam_file = f"{sim_name}.nam" -gwf = flopy.mf6.ModflowGwf( - sim, modelname=sim_name, model_nam_file=model_nam_file, save_flows=True -) - -# iterative model solver (ims) package -ims = flopy.mf6.modflow.mfims.ModflowIms( - sim, - pname="ims", - complexity="SIMPLE", - outer_dvclose=1e-6, - inner_dvclose=1e-6, - rcloserecord=1e-6, -) - -# grid discretization -dis = flopy.mf6.modflow.mfgwfdis.ModflowGwfdis( - gwf, - pname="dis", - nlay=nlay, - nrow=nrow, - ncol=ncol, - length_units="FEET", - delr=delr, - delc=delc, - top=top, - botm=botm, -) - -# initial conditions -ic = flopy.mf6.modflow.mfgwfic.ModflowGwfic(gwf, pname="ic", strt=top) - -# node property flow -npf = flopy.mf6.modflow.mfgwfnpf.ModflowGwfnpf( - gwf, pname="npf", icelltype=laytyp, k=kh, k33=kv -) - -# recharge -rch = flopy.mf6.modflow.mfgwfrcha.ModflowGwfrcha(gwf, recharge=rch) - - -# wells -def no_flow(w): - return w[0], w[1], w[2], 0 - - -wel = flopy.mf6.modflow.mfgwfwel.ModflowGwfwel( - gwf, - maxbound=1, - stress_period_data={0: [no_flow(w) for w in wells], 1: wells, 2: wells}, -) - -# river -rd = [[(0, i, ncol - 1), riv_h, riv_c, riv_z] for i in range(nrow)] -flopy.mf6.modflow.mfgwfriv.ModflowGwfriv( - gwf, stress_period_data={0: rd, 1: rd, 2: rd} -) - -# drain (set auxiliary IFACE var to 6 for top of cell) -dd = [ - [drain[0], drain[1], i + drain[2][0], 322.5, 100000.0, 6] - for i in range(drain[2][1] - drain[2][0]) -] -drn = flopy.mf6.modflow.mfgwfdrn.ModflowGwfdrn( - gwf, auxiliary=["IFACE"], stress_period_data={0: dd} -) - -# output control -headfile = f"{sim_name}.hds" -head_record = [headfile] -budgetfile = f"{sim_name}.cbb" -budget_record = [budgetfile] -saverecord = [("HEAD", "ALL"), ("BUDGET", "ALL")] -oc = flopy.mf6.modflow.mfgwfoc.ModflowGwfoc( - gwf, - pname="oc", - saverecord=saverecord, - head_filerecord=head_record, - budget_filerecord=budget_record, -) - - -# - - -# Take a look at the model grid before running the simulation. - - -# + -def add_release(ax): - ax.add_patch( - mpl.patches.Rectangle( - (2 * delc, (nrow - 2) * delr), - 1000, - -1000, - facecolor="green", - ) - ) - - -def add_legend(ax): - ax.legend( - handles=[ - mpl.patches.Patch(color="teal", label="river"), - mpl.patches.Patch(color="red", label="wells "), - mpl.patches.Patch(color="yellow", label="drain"), - mpl.patches.Patch(color="green", label="release"), - ] - ) - - -fig = plt.figure(figsize=(8, 8)) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -mv = flopy.plot.PlotMapView(model=gwf) -mv.plot_grid() -mv.plot_bc("DRN") -mv.plot_bc("RIV") -mv.plot_bc("WEL", plotAll=True) # include both wells (1st and 3rd layer) -add_release(ax) -add_legend(ax) -plt.show() -# - - -# ## Running the simulation -# -# Run the MODFLOW 6 flow simulation. - -sim.write_simulation() -success, buff = sim.run_simulation(silent=True, report=True) -assert success, "Failed to run simulation." -for line in buff: - print(line) - -# Create and run MODPATH 7 particle tracking model in `combined` mode, which includes both pathline and timeseries. - -# + -# create modpath files -mp = flopy.modpath.Modpath7( - modelname=f"{sim_name}_mp", - flowmodel=gwf, - exe_name="mp7", - model_ws=workspace, -) -mpbas = flopy.modpath.Modpath7Bas(mp, porosity=0.1, defaultiface=defaultiface) -mpsim = flopy.modpath.Modpath7Sim( - mp, - simulationtype="combined", - trackingdirection="forward", - weaksinkoption="pass_through", - weaksourceoption="pass_through", - budgetoutputoption="summary", - referencetime=[0, 0, 0.9], - timepointdata=[10, 20.0], # release every 20 days, for 200 days - zonedataoption="on", - zones=zone_maps, - particlegroups=pgs, -) - -mp.write_input() -success, buff = mp.run_model(silent=True, report=True) -assert success -for line in buff: - print(line) -# - - -# ## Inspecting results -# -# First we need the particle termination locations. - -wel_locs = [w[0:3] for w in wells] -riv_locs = [(0, i, 19) for i in range(20)] -drn_locs = [(drain[0], drain[1], d) for d in range(drain[2][0], drain[2][1])] -wel_nids = gwf.modelgrid.get_node(wel_locs) -riv_nids = gwf.modelgrid.get_node(riv_locs) -drn_nids = gwf.modelgrid.get_node(drn_locs) - -# Next, load pathline data from the MODPATH 7 pathline output file, filtering by termination location. - -# + -fpth = workspace / f"{sim_name}_mp.mppth" -p = flopy.utils.PathlineFile(fpth) - -pl1 = p.get_destination_pathline_data(wel_nids, to_recarray=True) -pl2 = p.get_destination_pathline_data(riv_nids + drn_nids, to_recarray=True) -# - - -# Load endpoint data from the MODPATH 7 endpoint output file. - -# + -fpth = workspace / f"{sim_name}_mp.mpend" -e = flopy.utils.EndpointFile(fpth) - -ep1 = e.get_destination_endpoint_data(dest_cells=wel_nids) -ep2 = e.get_destination_endpoint_data(dest_cells=riv_nids + drn_nids) -# - - -# Extract head data from the GWF model's output files. - -hf = flopy.utils.HeadFile(workspace / f"{sim_name}.hds") -head = hf.get_data() - -# Plot heads over a map view of the model, then add particle starting points and pathlines. The apparent number of particle starting locations is less than the total number of particles because a separate particle begins at each location every 20 days during the release period at the beginning of the simulation. - -# + -fig = plt.figure(figsize=(10, 10)) -ax = fig.add_subplot(1, 1, 1, aspect="equal") - -mv = flopy.plot.PlotMapView(model=gwf) -mv.plot_grid(lw=0.5) -mv.plot_bc("DRN") -mv.plot_bc("RIV") -mv.plot_bc("WEL", plotAll=True) -hd = mv.plot_array(head, alpha=0.1) -cb = plt.colorbar(hd, shrink=0.5) -cb.set_label("Head") -mv.plot_pathline( - pl1, layer="all", alpha=0.1, colors=["red"], lw=2, label="captured by well" -) -mv.plot_pathline( - pl2, - layer="all", - alpha=0.1, - colors=["blue"], - lw=2, - label="captured by drain/river", -) -add_release(ax) -mv.ax.legend() -plt.show() -# - - -# Clean up the temporary directory. - -try: - # ignore PermissionError on Windows - temp_dir.cleanup() -except: - pass diff --git a/.docs/Notebooks/modpath7_unstructured_example.py b/.docs/Notebooks/modpath7_unstructured_example.py deleted file mode 100644 index 54e4ee3c0..000000000 --- a/.docs/Notebooks/modpath7_unstructured_example.py +++ /dev/null @@ -1,561 +0,0 @@ -# --- -# jupyter: -# jupytext: -# notebook_metadata_filter: all -# text_representation: -# extension: .py -# format_name: light -# format_version: '1.5' -# jupytext_version: 1.14.5 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# metadata: -# section: modpath -# authors: -# - name: Joseph Hughes -# --- - -# # Using MODPATH 7 with a DISV unstructured model -# -# This is a replication of the MODPATH Problem 2 example that is described on page 12 of the modpath_7_examples.pdf file. The results shown here should be the same as the results in the MODPATH example, however, the vertex and node numbering used here may be different from the numbering used in MODPATH, so head values may not be compared directly without some additional mapping. - -# ## Part I. Setup Notebook - -import os - -# + -import sys -from pathlib import Path -from tempfile import TemporaryDirectory - -import matplotlib as mpl -import matplotlib.pyplot as plt -import numpy as np - -proj_root = Path.cwd().parent.parent - -import flopy - -print(sys.version) -print(f"numpy version: {np.__version__}") -print(f"matplotlib version: {mpl.__version__}") -print(f"flopy version: {flopy.__version__}") - -# temporary directory -temp_dir = TemporaryDirectory() -workspace = Path(temp_dir.name) -# - - -# ## Part II. Gridgen Creation of Model Grid -# -# Create the base model grid. - -Lx = 10000.0 -Ly = 10500.0 -nlay = 3 -nrow = 21 -ncol = 20 -delr = Lx / ncol -delc = Ly / nrow -top = 400 -botm = [220, 200, 0] - -ms = flopy.modflow.Modflow() -dis5 = flopy.modflow.ModflowDis( - ms, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - top=top, - botm=botm, -) - -# Create the `Gridgen` object. - -# + -from flopy.utils.gridgen import Gridgen - -model_name = "mp7p2_u" -model_ws = workspace / "mp7_ex2" / "mf6" -gridgen_ws = model_ws / "gridgen" -g = Gridgen(ms.modelgrid, model_ws=gridgen_ws) -# - - -# Refine the grid. - -# + -rf0shp = gridgen_ws / "rf0" -xmin = 7 * delr -xmax = 12 * delr -ymin = 8 * delc -ymax = 13 * delc -rfpoly = [ - [ - list( - reversed( - [ - (xmin, ymin), - (xmax, ymin), - (xmax, ymax), - (xmin, ymax), - (xmin, ymin), - ] - ) - ) - ] -] -g.add_refinement_features(rfpoly, "polygon", 1, range(nlay)) - -rf1shp = gridgen_ws / "rf1" -xmin = 8 * delr -xmax = 11 * delr -ymin = 9 * delc -ymax = 12 * delc -rfpoly = [ - [ - list( - reversed( - [ - (xmin, ymin), - (xmax, ymin), - (xmax, ymax), - (xmin, ymax), - (xmin, ymin), - ] - ) - ) - ] -] -g.add_refinement_features(rfpoly, "polygon", 2, range(nlay)) - -rf2shp = gridgen_ws / "rf2" -xmin = 9 * delr -xmax = 10 * delr -ymin = 10 * delc -ymax = 11 * delc -rfpoly = [ - [ - list( - reversed( - [ - (xmin, ymin), - (xmax, ymin), - (xmax, ymax), - (xmin, ymax), - (xmin, ymin), - ] - ) - ) - ] -] -g.add_refinement_features(rfpoly, "polygon", 3, range(nlay)) -# - - -# Show the model grid with refinement levels superimposed. - -fig = plt.figure(figsize=(5, 5), constrained_layout=True) -ax = fig.add_subplot(1, 1, 1) -mm = flopy.plot.PlotMapView(model=ms) -mm.plot_grid() -flopy.plot.plot_shapefile(rf0shp, ax=ax, facecolor="yellow", edgecolor="none") -flopy.plot.plot_shapefile(rf1shp, ax=ax, facecolor="pink", edgecolor="none") -flopy.plot.plot_shapefile(rf2shp, ax=ax, facecolor="red", edgecolor="none") - -# Build the refined grid. - -g.build(verbose=False) - -# Show the refined grid. - -fig = plt.figure(figsize=(5, 5), constrained_layout=True) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -g.plot(ax, linewidth=0.5) - -# Extract the refined grid's properties. - -gridprops = g.get_gridprops_disv() -ncpl = gridprops["ncpl"] -top = gridprops["top"] -botm = gridprops["botm"] -nvert = gridprops["nvert"] -vertices = gridprops["vertices"] -cell2d = gridprops["cell2d"] - -# ## Part III. Create the Flopy Model - -# + -# create simulation -sim = flopy.mf6.MFSimulation( - sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=model_ws -) - -# create tdis package -tdis_rc = [(1000.0, 1, 1.0)] -tdis = flopy.mf6.ModflowTdis( - sim, pname="tdis", time_units="DAYS", perioddata=tdis_rc -) - -# create gwf model -gwf = flopy.mf6.ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" -) -gwf.name_file.save_flows = True - -# create iterative model solution and register the gwf model with it -ims = flopy.mf6.ModflowIms( - sim, - pname="ims", - print_option="SUMMARY", - complexity="SIMPLE", - outer_dvclose=1.0e-5, - outer_maximum=100, - under_relaxation="NONE", - inner_maximum=100, - inner_dvclose=1.0e-6, - rcloserecord=0.1, - linear_acceleration="BICGSTAB", - scaling_method="NONE", - reordering_method="NONE", - relaxation_factor=0.99, -) -sim.register_ims_package(ims, [gwf.name]) - -# disv -disv = flopy.mf6.ModflowGwfdisv( - gwf, - nlay=nlay, - ncpl=ncpl, - top=top, - botm=botm, - nvert=nvert, - vertices=vertices, - cell2d=cell2d, -) - -# initial conditions -ic = flopy.mf6.ModflowGwfic(gwf, pname="ic", strt=320.0) - -# node property flow -npf = flopy.mf6.ModflowGwfnpf( - gwf, - xt3doptions=[("xt3d")], - icelltype=[1, 0, 0], - k=[50.0, 0.01, 200.0], - k33=[10.0, 0.01, 20.0], -) - -# wel -wellpoints = [(4750.0, 5250.0)] -welcells = g.intersect(wellpoints, "point", 0) -# welspd = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=1, aux_vars=['iface']) -welspd = [[(2, icpl), -150000, 0] for icpl in welcells["nodenumber"]] -wel = flopy.mf6.ModflowGwfwel( - gwf, print_input=True, auxiliary=[("iface",)], stress_period_data=welspd -) - -# rch -aux = [np.ones(ncpl, dtype=int) * 6] -rch = flopy.mf6.ModflowGwfrcha( - gwf, recharge=0.005, auxiliary=[("iface",)], aux={0: [6]} -) -# riv -riverline = [[(Lx - 1.0, Ly), (Lx - 1.0, 0.0)]] -rivcells = g.intersect(riverline, "line", 0) -rivspd = [[(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"]] -riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd) - -# output control -oc = flopy.mf6.ModflowGwfoc( - gwf, - pname="oc", - budget_filerecord=f"{model_name}.cbb", - head_filerecord=f"{model_name}.hds", - headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], - saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], -) -# - - -# Now write the simulation input files. - -sim.write_simulation() - -# ## Part IV. Run the MODFLOW 6 Model - -success, buff = sim.run_simulation(silent=True, report=True) -assert success, "mf6 failed to run" -for line in buff: - print(line) - -# ## Part V. Import and Plot the Results - -# Plot the boundary conditions on the grid. - -fname = os.path.join(model_ws, f"{model_name}.disv.grb") -grd = flopy.mf6.utils.MfGrdFile(fname, verbose=False) -mg = grd.modelgrid -ibd = np.zeros((ncpl), dtype=int) -ibd[welcells["nodenumber"]] = 1 -ibd[rivcells["nodenumber"]] = 2 -ibd = np.ma.masked_equal(ibd, 0) -fig = plt.figure(figsize=(8, 8), constrained_layout=True) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -pmv = flopy.plot.PlotMapView(modelgrid=mg, ax=ax) -ax.set_xlim(0, Lx) -ax.set_ylim(0, Ly) -cmap = mpl.colors.ListedColormap( - [ - "r", - "g", - ] -) -pc = pmv.plot_array(ibd, cmap=cmap, edgecolor="gray") -t = ax.set_title("Boundary Conditions\n") - -fname = os.path.join(model_ws, f"{model_name}.hds") -hdobj = flopy.utils.HeadFile(fname) -head = hdobj.get_data() -head.shape - -ilay = 2 -cint = 0.25 -fig = plt.figure(figsize=(8, 8), constrained_layout=True) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -mm = flopy.plot.PlotMapView(modelgrid=mg, ax=ax, layer=ilay) -ax.set_xlim(0, Lx) -ax.set_ylim(0, Ly) -pc = mm.plot_array(head[:, 0, :], cmap="jet", edgecolor="black") -hmin = head[ilay, 0, :].min() -hmax = head[ilay, 0, :].max() -levels = np.arange(np.floor(hmin), np.ceil(hmax) + cint, cint) -cs = mm.contour_array(head[:, 0, :], colors="white", levels=levels) -plt.clabel(cs, fmt="%.1f", colors="white", fontsize=11) -cb = plt.colorbar(pc, shrink=0.5) -t = ax.set_title(f"Model Layer {ilay + 1}; hmin={hmin:6.2f}, hmax={hmax:6.2f}") - -# Inspect model cells and vertices. - -# + -# zoom area -xmin, xmax = 2000, 4500 -ymin, ymax = 5400, 7500 - -mg.get_cell_vertices -fig = plt.figure(figsize=(8, 8), constrained_layout=True) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -mm = flopy.plot.PlotMapView(modelgrid=mg, ax=ax) -v = mm.plot_grid(edgecolor="black") -t = ax.set_title("Model Cells and Vertices (one-based)\n") -ax.set_xlim(xmin, xmax) -ax.set_ylim(ymin, ymax) - -verts = mg.verts -ax.plot(verts[:, 0], verts[:, 1], "bo") -for i in range(ncpl): - x, y = verts[i, 0], verts[i, 1] - if xmin <= x <= xmax and ymin <= y <= ymax: - ax.annotate(str(i + 1), verts[i, :], color="b") - -xc, yc = mg.get_xcellcenters_for_layer(0), mg.get_ycellcenters_for_layer(0) -for i in range(ncpl): - x, y = xc[i], yc[i] - ax.plot(x, y, "ro") - if xmin <= x <= xmax and ymin <= y <= ymax: - ax.annotate(str(i + 1), (x, y), color="r") -# - - -# ## Part VI. Create the Flopy MODPATH7 Models -# -# Define names for the MODPATH 7 simulations. - -mp_namea = f"{model_name}a_mp" -mp_nameb = f"{model_name}b_mp" - -# Create particles for the pathline and timeseries analysis. - -# + -pcoord = np.array( - [ - [0.000, 0.125, 0.500], - [0.000, 0.375, 0.500], - [0.000, 0.625, 0.500], - [0.000, 0.875, 0.500], - [1.000, 0.125, 0.500], - [1.000, 0.375, 0.500], - [1.000, 0.625, 0.500], - [1.000, 0.875, 0.500], - [0.125, 0.000, 0.500], - [0.375, 0.000, 0.500], - [0.625, 0.000, 0.500], - [0.875, 0.000, 0.500], - [0.125, 1.000, 0.500], - [0.375, 1.000, 0.500], - [0.625, 1.000, 0.500], - [0.875, 1.000, 0.500], - ] -) -nodew = gwf.disv.ncpl.array * 2 + welcells["nodenumber"][0] -plocs = [nodew for i in range(pcoord.shape[0])] - -# create particle data -pa = flopy.modpath.ParticleData( - plocs, - structured=False, - localx=pcoord[:, 0], - localy=pcoord[:, 1], - localz=pcoord[:, 2], - drape=0, -) - -# create backward particle group -fpth = f"{mp_namea}.sloc" -pga = flopy.modpath.ParticleGroup( - particlegroupname="BACKWARD1", particledata=pa, filename=fpth -) -# - - -# Create particles for endpoint analysis. - -facedata = flopy.modpath.FaceDataType( - drape=0, - verticaldivisions1=10, - horizontaldivisions1=10, - verticaldivisions2=10, - horizontaldivisions2=10, - verticaldivisions3=10, - horizontaldivisions3=10, - verticaldivisions4=10, - horizontaldivisions4=10, - rowdivisions5=0, - columndivisions5=0, - rowdivisions6=4, - columndivisions6=4, -) -pb = flopy.modpath.NodeParticleData(subdivisiondata=facedata, nodes=nodew) -# create forward particle group -fpth = f"{mp_nameb}.sloc" -pgb = flopy.modpath.ParticleGroupNodeTemplate( - particlegroupname="BACKWARD2", particledata=pb, filename=fpth -) - -# Create and run the pathline and timeseries analysis model. - -# + -# create modpath files -mp = flopy.modpath.Modpath7( - modelname=mp_namea, flowmodel=gwf, exe_name="mp7", model_ws=model_ws -) -flopy.modpath.Modpath7Bas(mp, porosity=0.1) -flopy.modpath.Modpath7Sim( - mp, - simulationtype="combined", - trackingdirection="backward", - weaksinkoption="pass_through", - weaksourceoption="pass_through", - referencetime=0.0, - stoptimeoption="extend", - timepointdata=[500, 1000.0], - particlegroups=pga, -) - -# write modpath datasets -mp.write_input() - -# run modpath -success, buff = mp.run_model(silent=True, report=True) -assert success, "mp7 failed to run" -for line in buff: - print(line) -# - - -# Load the pathline and timeseries data. - -fpth = model_ws / f"{mp_namea}.mppth" -p = flopy.utils.PathlineFile(fpth) -p0 = p.get_alldata() - -fpth = model_ws / f"{mp_namea}.timeseries" -ts = flopy.utils.TimeseriesFile(fpth) -ts0 = ts.get_alldata() - -# Plot the pathline and timeseries data. - -fig = plt.figure(figsize=(8, 8), constrained_layout=True) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -mm = flopy.plot.PlotMapView(modelgrid=mg, ax=ax) -ax.set_xlim(0, Lx) -ax.set_ylim(0, Ly) -cmap = mpl.colors.ListedColormap( - [ - "r", - "g", - ] -) -v = mm.plot_array(ibd, cmap=cmap, edgecolor="gray") -mm.plot_pathline(p0, layer="all", colors="blue", lw=0.75) -colors = ["green", "orange", "red"] -for k in range(nlay): - mm.plot_timeseries(ts0, layer=k, marker="o", lw=0, color=colors[k]) - -# Create and run the endpoint analysis model. - -# + -# create modpath files -mp = flopy.modpath.Modpath7( - modelname=mp_nameb, flowmodel=gwf, exe_name="mp7", model_ws=model_ws -) -flopy.modpath.Modpath7Bas(mp, porosity=0.1) -flopy.modpath.Modpath7Sim( - mp, - simulationtype="endpoint", - trackingdirection="backward", - weaksinkoption="pass_through", - weaksourceoption="pass_through", - referencetime=0.0, - stoptimeoption="extend", - particlegroups=pgb, -) - -# write modpath datasets -mp.write_input() - -# run modpath -success, buff = mp.run_model(silent=True, report=True) -assert success, "mp7 failed to run" -for line in buff: - print(line) -# - - -# Load the endpoint data. - -fpth = model_ws / f"{mp_nameb}.mpend" -e = flopy.utils.EndpointFile(fpth) -e0 = e.get_alldata() - -# Plot the endpoint data. - -fig = plt.figure(figsize=(8, 8), constrained_layout=True) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -mm = flopy.plot.PlotMapView(modelgrid=mg, ax=ax) -ax.set_xlim(0, Lx) -ax.set_ylim(0, Ly) -cmap = mpl.colors.ListedColormap( - [ - "r", - "g", - ] -) -v = mm.plot_array(ibd, cmap=cmap, edgecolor="gray") -mm.plot_endpoint(e0, direction="ending", colorbar=True, shrink=0.5) - -# Clean up the temporary workspace. - -try: - # ignore PermissionError on Windows - temp_dir.cleanup() -except: - pass diff --git a/.docs/Notebooks/modpath7_unstructured_lateral_example.py b/.docs/Notebooks/modpath7_unstructured_lateral_example.py deleted file mode 100644 index 9b3f85c3b..000000000 --- a/.docs/Notebooks/modpath7_unstructured_lateral_example.py +++ /dev/null @@ -1,576 +0,0 @@ -# --- -# jupyter: -# jupytext: -# notebook_metadata_filter: all -# text_representation: -# extension: .py -# format_name: light -# format_version: '1.5' -# jupytext_version: 1.14.5 -# kernelspec: -# display_name: Python 3 -# language: python -# name: python3 -# metadata: -# section: modpath -# authors: -# - name: Wes Bonelli -# --- - -# # Using MODPATH 7: DISV quadpatch example -# -# This notebook demonstrates example 4 from the MODPATH 7 documentation, a steady-state MODFLOW 6 simulation using a quadpatch DISV grid with an irregular domain and a large number of inactive cells. Particles are tracked backwards from terminating locations, including a pair of wells in a locally-refined region of the grid and constant-head cells along the grid's right side, to release locations along the left border of the grid's active region. Injection wells along the left-hand border are used to generate boundary flows. -# -# First import FloPy and set up a temporary workspace. - -# + -import sys -from pathlib import Path -from tempfile import TemporaryDirectory - -import matplotlib as mpl -import matplotlib.pyplot as plt -import numpy as np - -proj_root = Path.cwd().parent.parent - -import flopy - -temp_dir = TemporaryDirectory() -workspace = Path(temp_dir.name) -sim_name = "ex04_mf6" - -print("Python version:", sys.version) -print("NumPy version:", np.__version__) -print("Matplotlib version:", mpl.__version__) -print("FloPy version:", flopy.__version__) -# - - -# ## Grid creation/refinement -# -# In this example we use GRIDGEN to create a quadpatch grid with a refined region in the upper left quadrant. -# -# The grid has 3 nested refinement levels, all nearly but not perfectly rectangular (a 500x500 area is carved out of each corner of each). Outer levels of refinement have a width of 500. To produce this pattern we use 5 rectangular polygons for each level. -# -# First, create the coarse-grained grid discretization. - -nlay, nrow, ncol = 1, 21, 26 # coarsest-grained grid is 21x26 -delr = delc = 500.0 -top = 100.0 -botm = np.zeros((nlay, nrow, ncol), dtype=np.float32) -ms = flopy.modflow.Modflow() -dis = flopy.modflow.ModflowDis( - ms, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - top=top, - botm=botm, -) - -# Next, refine the grid. Create a `Gridgen` object from the base grid, then add refinement features (3 groups of polygons). - -# + -from flopy.utils.gridgen import Gridgen - -# create Gridgen workspace -gridgen_ws = workspace / "gridgen" -gridgen_ws.mkdir() - -# create Gridgen object -g = Gridgen(ms.modelgrid, model_ws=gridgen_ws) - -# add polygon for each refinement level -outer_polygon = [ - [ - (2500, 6000), - (2500, 9500), - (3000, 9500), - (3000, 10000), - (6000, 10000), - (6000, 9500), - (6500, 9500), - (6500, 6000), - (6000, 6000), - (6000, 5500), - (3000, 5500), - (3000, 6000), - (2500, 6000), - ] -] -g.add_refinement_features([outer_polygon], "polygon", 1, range(nlay)) -refshp0 = gridgen_ws / "rf0" - -middle_polygon = [ - [ - (3000, 6500), - (3000, 9000), - (3500, 9000), - (3500, 9500), - (5500, 9500), - (5500, 9000), - (6000, 9000), - (6000, 6500), - (5500, 6500), - (5500, 6000), - (3500, 6000), - (3500, 6500), - (3000, 6500), - ] -] -g.add_refinement_features([middle_polygon], "polygon", 2, range(nlay)) -refshp1 = gridgen_ws / "rf1" - -inner_polygon = [ - [ - (3500, 7000), - (3500, 8500), - (4000, 8500), - (4000, 9000), - (5000, 9000), - (5000, 8500), - (5500, 8500), - (5500, 7000), - (5000, 7000), - (5000, 6500), - (4000, 6500), - (4000, 7000), - (3500, 7000), - ] -] -g.add_refinement_features([inner_polygon], "polygon", 3, range(nlay)) -refshp2 = gridgen_ws / "rf2" -# - - -# Create and plot the refined grid with refinement levels superimposed. - -# + -g.build(verbose=False) -grid = flopy.discretization.VertexGrid(**g.get_gridprops_vertexgrid()) - -fig = plt.figure(figsize=(15, 15)) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -mm = flopy.plot.PlotMapView(model=ms) -grid.plot(ax=ax) -flopy.plot.plot_shapefile(refshp0, ax=ax, facecolor="green", alpha=0.3) -flopy.plot.plot_shapefile(refshp1, ax=ax, facecolor="green", alpha=0.5) -flopy.plot.plot_shapefile(str(refshp2), ax=ax, facecolor="green", alpha=0.7) -# - - -# ## Groundwater flow model -# -# Next, create a GWF model. The particle-tracking model will consume its output. - -# + -# simulation -sim = flopy.mf6.MFSimulation( - sim_name=sim_name, sim_ws=workspace, exe_name="mf6", version="mf6" -) - -# temporal discretization -tdis = flopy.mf6.ModflowTdis( - sim, time_units="days", nper=1, perioddata=[(10000, 1, 1.0)] -) - -# iterative model solver -ims = flopy.mf6.ModflowIms( - sim, - pname="ims", - complexity="SIMPLE", - outer_dvclose=1e-4, - outer_maximum=100, - inner_dvclose=1e-5, - under_relaxation_theta=0, - under_relaxation_kappa=0, - under_relaxation_gamma=0, - under_relaxation_momentum=0, - linear_acceleration="BICGSTAB", - relaxation_factor=0.99, - number_orthogonalizations=2, -) - -# groundwater flow model -gwf = flopy.mf6.ModflowGwf( - sim, modelname=sim_name, model_nam_file=f"{sim_name}.nam", save_flows=True -) - -# grid discretization -# fmt: off -idomain = [ - 0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0, - 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1, - 1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0, - 0,0,0,0,0,0,0,0,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0, - 0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, - 0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0 -] -# fmt: on -disv_props = g.get_gridprops_disv() -disv = flopy.mf6.ModflowGwfdisv( - gwf, length_units="feet", idomain=idomain, **disv_props -) - -# initial conditions -ic = flopy.mf6.ModflowGwfic(gwf, strt=150.0) - -# wells are tuples (layer, node number, q, iface) -wells = [ - # negative q: discharge - (0, 861, -30000.0, 0), - (0, 891, -30000.0, 0), - # positive q: injection - (0, 1959, 10000.0, 1), - (0, 1932, 10000.0, 3), - (0, 1931, 10000.0, 3), - (0, 1930, 5000.0, 1), - (0, 1930, 5000.0, 3), - (0, 1903, 5000.0, 1), - (0, 1903, 5000.0, 3), - (0, 1876, 10000.0, 3), - (0, 1875, 10000.0, 3), - (0, 1874, 5000.0, 1), - (0, 1874, 5000.0, 3), - (0, 1847, 10000.0, 3), - (0, 1846, 5000.0, 3), - (0, 1845, 5000.0, 1), - (0, 1845, 5000.0, 3), - (0, 1818, 5000.0, 1), - (0, 1818, 5000.0, 3), - (0, 1792, 10000.0, 1), - (0, 1766, 10000.0, 1), - (0, 1740, 5000.0, 1), - (0, 1740, 5000.0, 4), - (0, 1715, 5000.0, 1), - (0, 1715, 5000.0, 4), - (0, 1690, 10000.0, 1), - (0, 1646, 5000.0, 1), - (0, 1646, 5000.0, 4), - (0, 1549, 5000.0, 1), - (0, 1549, 5000.0, 4), - (0, 1332, 5000.0, 4), - (0, 1332, 5000.0, 1), - (0, 1021, 2500.0, 1), - (0, 1021, 2500.0, 4), - (0, 1020, 5000.0, 1), - (0, 708, 2500.0, 1), - (0, 708, 2500.0, 4), - (0, 711, 625.0, 1), - (0, 711, 625.0, 4), - (0, 710, 625.0, 1), - (0, 710, 625.0, 4), - (0, 409, 1250.0, 1), - (0, 407, 625.0, 1), - (0, 407, 625.0, 4), - (0, 402, 625.0, 1), - (0, 402, 625.0, 4), - (0, 413, 1250.0, 1), - (0, 411, 1250.0, 1), - (0, 203, 1250.0, 1), - (0, 202, 1250.0, 1), - (0, 202, 1250.0, 4), - (0, 199, 2500.0, 1), - (0, 197, 1250.0, 1), - (0, 197, 1250.0, 4), - (0, 96, 2500.0, 1), - (0, 97, 1250.0, 1), - (0, 97, 1250.0, 4), - (0, 103, 1250.0, 1), - (0, 103, 1250.0, 4), - (0, 102, 1250.0, 1), - (0, 102, 1250.0, 4), - (0, 43, 2500.0, 1), - (0, 43, 2500.0, 4), - (0, 44, 2500.0, 1), - (0, 44, 2500.0, 4), - (0, 45, 5000.0, 4), - (0, 10, 10000.0, 1), -] -flopy.mf6.modflow.mfgwfwel.ModflowGwfwel( - gwf, - maxbound=68, - auxiliary="IFACE", - save_flows=True, - stress_period_data={0: wells}, -) - -# node property flow -npf = flopy.mf6.ModflowGwfnpf( - gwf, - xt3doptions=True, - save_flows=True, - save_specific_discharge=True, - icelltype=[0], - k=[50], -) - -# constant head boundary (period, node number, head) -chd_bound = [ - (0, 1327, 150.0), - (0, 1545, 150.0), - (0, 1643, 150.0), - (0, 1687, 150.0), - (0, 1713, 150.0), -] -chd = flopy.mf6.ModflowGwfchd( - gwf, pname="chd", save_flows=True, stress_period_data=chd_bound -) - -# output control -budget_file = f"{sim_name}.bud" -head_file = f"{sim_name}.hds" -oc = flopy.mf6.ModflowGwfoc( - gwf, - pname="oc", - budget_filerecord=[budget_file], - head_filerecord=[head_file], - saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], -) -# - - -# Before running the simulation, view the model's boundary conditions. - -# + -fig = plt.figure(figsize=(13, 13)) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -mv = flopy.plot.PlotMapView(model=gwf, ax=ax) -mv.plot_grid(alpha=0.3) -mv.plot_ibound() -mv.plot_bc("WEL") -ax.add_patch( - mpl.patches.Rectangle( - ((ncol - 1) * delc, (nrow - 6) * delr), - 1000, - -2500, - linewidth=5, - facecolor="blue", - alpha=0.5, - ) -) -ax.legend( - handles=[ - mpl.patches.Patch(color="red", label="WEL"), - mpl.patches.Patch(color="blue", label="CHB"), - ] -) - -plt.show() -# - - -# Run the simulation. - -sim.set_sim_path(workspace) -sim.write_simulation() -success, buff = sim.run_simulation(silent=True, report=True) -assert success, "Failed to run MF6 simulation." -for line in buff: - print(line) - -# ## Particle tracking - -# -# This example is a reverse-tracking model, with termination and release zones inverted: we "release" particles from the constant head boundary on the grid's right edge and from the two pumping wells, and track the particles backwards to release locations at the wells along the left boundary of the active domain. - -# + -particles = [ - # node number, localx, localy, localz - (1327, 0.000, 0.125, 0.500), - (1327, 0.000, 0.375, 0.500), - (1327, 0.000, 0.625, 0.500), - (1327, 0.000, 0.875, 0.500), - (1545, 0.000, 0.125, 0.500), - (1545, 0.000, 0.375, 0.500), - (1545, 0.000, 0.625, 0.500), - (1545, 0.000, 0.875, 0.500), - (1643, 0.000, 0.125, 0.500), - (1643, 0.000, 0.375, 0.500), - (1643, 0.000, 0.625, 0.500), - (1643, 0.000, 0.875, 0.500), - (1687, 0.000, 0.125, 0.500), - (1687, 0.000, 0.375, 0.500), - (1687, 0.000, 0.625, 0.500), - (1687, 0.000, 0.875, 0.500), - (1713, 0.000, 0.125, 0.500), - (1713, 0.000, 0.375, 0.500), - (1713, 0.000, 0.625, 0.500), - (1713, 0.000, 0.875, 0.500), - (861, 0.000, 0.125, 0.500), - (861, 0.000, 0.375, 0.500), - (861, 0.000, 0.625, 0.500), - (861, 0.000, 0.875, 0.500), - (861, 1.000, 0.125, 0.500), - (861, 1.000, 0.375, 0.500), - (861, 1.000, 0.625, 0.500), - (861, 1.000, 0.875, 0.500), - (861, 0.125, 0.000, 0.500), - (861, 0.375, 0.000, 0.500), - (861, 0.625, 0.000, 0.500), - (861, 0.875, 0.000, 0.500), - (861, 0.125, 1.000, 0.500), - (861, 0.375, 1.000, 0.500), - (861, 0.625, 1.000, 0.500), - (861, 0.875, 1.000, 0.500), - (891, 0.000, 0.125, 0.500), - (891, 0.000, 0.375, 0.500), - (891, 0.000, 0.625, 0.500), - (891, 0.000, 0.875, 0.500), - (891, 1.000, 0.125, 0.500), - (891, 1.000, 0.375, 0.500), - (891, 1.000, 0.625, 0.500), - (891, 1.000, 0.875, 0.500), - (891, 0.125, 0.000, 0.500), - (891, 0.375, 0.000, 0.500), - (891, 0.625, 0.000, 0.500), - (891, 0.875, 0.000, 0.500), - (891, 0.125, 1.000, 0.500), - (891, 0.375, 1.000, 0.500), - (891, 0.625, 1.000, 0.500), - (891, 0.875, 1.000, 0.500), -] - -pd = flopy.modpath.ParticleData( - partlocs=[p[0] for p in particles], - localx=[p[1] for p in particles], - localy=[p[2] for p in particles], - localz=[p[3] for p in particles], - timeoffset=0, - drape=0, -) -pg = flopy.modpath.ParticleGroup( - particlegroupname="G1", particledata=pd, filename=f"{sim_name}.sloc" -) -# - - -# Create and run the backwards particle tracking model in `pathline` mode. - -# + -mp = flopy.modpath.Modpath7( - modelname=f"{sim_name}_mp", - flowmodel=gwf, - exe_name="mp7", - model_ws=workspace, -) -mpbas = flopy.modpath.Modpath7Bas( - mp, - porosity=0.1, -) -mpsim = flopy.modpath.Modpath7Sim( - mp, - simulationtype="pathline", - trackingdirection="backward", - budgetoutputoption="summary", - particlegroups=[pg], -) - -mp.write_input() -success, buff = mp.run_model(silent=True, report=True) -assert success, "Failed to run particle-tracking model." -for line in buff: - print(line) -# - - -# Load pathline data from the model's pathline output file. - -fpth = workspace / f"{sim_name}_mp.mppth" -p = flopy.utils.PathlineFile(fpth) -pl = p.get_destination_pathline_data( - range(gwf.modelgrid.nnodes), to_recarray=True -) - -# Load head data. - -hf = flopy.utils.HeadFile(workspace / f"{sim_name}.hds") -hd = hf.get_data() - -# Plot heads and particle paths over the grid. - -fig = plt.figure(figsize=(11, 11)) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -mm = flopy.plot.PlotMapView(model=gwf) -mm.plot_grid(lw=0.5, alpha=0.5) -mm.plot_ibound() -mm.plot_array(hd, alpha=0.5) -mm.plot_pathline(pl, layer="all", lw=0.3, colors=["black"]) -plt.show() - -# Clean up the temporary workspace. - -try: - # ignore PermissionError on Windows - temp_dir.cleanup() -except: - pass diff --git a/.docs/Notebooks/plot_cross_section_example.py b/.docs/Notebooks/plot_cross_section_example.py index e25337940..d5d2ef381 100644 --- a/.docs/Notebooks/plot_cross_section_example.py +++ b/.docs/Notebooks/plot_cross_section_example.py @@ -34,9 +34,6 @@ import matplotlib.pyplot as plt import numpy as np -sys.path.append(os.path.join("..", "common")) -import notebook_utils - import flopy print(sys.version) @@ -53,8 +50,7 @@ exe_name_mf6 = "mf6" # Set the paths -prj_root = notebook_utils.get_project_root_path() -loadpth = str(prj_root / "examples" / "data" / "freyberg") +loadpth = os.path.join("..", "..", "examples", "data", "freyberg") tempdir = TemporaryDirectory() modelpth = tempdir.name @@ -416,7 +412,7 @@ # + # load the Freyberg model into mf6-flopy and run the simulation sim_name = "mfsim.nam" -sim_path = str(prj_root / "examples" / "data" / "mf6-freyberg") +sim_path = os.path.join("..", "..", "examples", "data", "mf6-freyberg") sim = flopy.mf6.MFSimulation.load( sim_name=sim_name, version=vmf6, exe_name=exe_name_mf6, sim_ws=sim_path ) @@ -424,11 +420,8 @@ sim.set_sim_path(modelpth) sim.write_simulation() success, buff = sim.run_simulation(silent=True, report=True) -if success: - for line in buff: - print(line) -else: - raise ValueError("Something bad happened.") +assert success, pformat(buff) + files = ["freyberg.hds", "freyberg.cbc"] for f in files: if os.path.isfile(os.path.join(str(modelpth), f)): @@ -520,9 +513,330 @@ # # FloPy fully supports vertex discretization (DISV) plotting through the `PlotCrossSection` class. The method calls are identical to the ones presented previously for Structured discretization (DIS) and the same matplotlib keyword arguments are supported. Let's run through an example using a vertex model grid. + # + pycharm={"name": "#%%\n"} # build and run vertex model grid demo problem -notebook_utils.run(modelpth) +def run_vertex_grid_example(ws): + """load and run vertex grid example""" + if not os.path.exists(ws): + os.mkdir(ws) + + from flopy.utils.gridgen import Gridgen + + Lx = 10000.0 + Ly = 10500.0 + nlay = 3 + nrow = 21 + ncol = 20 + delr = Lx / ncol + delc = Ly / nrow + top = 400 + botm = [220, 200, 0] + + ms = flopy.modflow.Modflow() + dis5 = flopy.modflow.ModflowDis( + ms, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=delr, + delc=delc, + top=top, + botm=botm, + ) + + model_name = "mp7p2" + model_ws = os.path.join(ws, "mp7_ex2", "mf6") + gridgen_ws = os.path.join(model_ws, "gridgen") + g = Gridgen(ms.modelgrid, model_ws=gridgen_ws) + + rf0shp = os.path.join(gridgen_ws, "rf0") + xmin = 7 * delr + xmax = 12 * delr + ymin = 8 * delc + ymax = 13 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 1, range(nlay)) + + rf1shp = os.path.join(gridgen_ws, "rf1") + xmin = 8 * delr + xmax = 11 * delr + ymin = 9 * delc + ymax = 12 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 2, range(nlay)) + + rf2shp = os.path.join(gridgen_ws, "rf2") + xmin = 9 * delr + xmax = 10 * delr + ymin = 10 * delc + ymax = 11 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 3, range(nlay)) + + g.build(verbose=False) + + gridprops = g.get_gridprops_disv() + ncpl = gridprops["ncpl"] + top = gridprops["top"] + botm = gridprops["botm"] + nvert = gridprops["nvert"] + vertices = gridprops["vertices"] + cell2d = gridprops["cell2d"] + # cellxy = gridprops['cellxy'] + + # create simulation + sim = flopy.mf6.MFSimulation( + sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=model_ws + ) + + # create tdis package + tdis_rc = [(1000.0, 1, 1.0)] + tdis = flopy.mf6.ModflowTdis( + sim, pname="tdis", time_units="DAYS", perioddata=tdis_rc + ) + + # create gwf model + gwf = flopy.mf6.ModflowGwf( + sim, modelname=model_name, model_nam_file=f"{model_name}.nam" + ) + gwf.name_file.save_flows = True + + # create iterative model solution and register the gwf model with it + ims = flopy.mf6.ModflowIms( + sim, + pname="ims", + print_option="SUMMARY", + complexity="SIMPLE", + outer_hclose=1.0e-5, + outer_maximum=100, + under_relaxation="NONE", + inner_maximum=100, + inner_hclose=1.0e-6, + rcloserecord=0.1, + linear_acceleration="BICGSTAB", + scaling_method="NONE", + reordering_method="NONE", + relaxation_factor=0.99, + ) + sim.register_ims_package(ims, [gwf.name]) + + # disv + disv = flopy.mf6.ModflowGwfdisv( + gwf, + nlay=nlay, + ncpl=ncpl, + top=top, + botm=botm, + nvert=nvert, + vertices=vertices, + cell2d=cell2d, + ) + + # initial conditions + ic = flopy.mf6.ModflowGwfic(gwf, pname="ic", strt=320.0) + + # node property flow + npf = flopy.mf6.ModflowGwfnpf( + gwf, + xt3doptions=[("xt3d")], + save_specific_discharge=True, + icelltype=[1, 0, 0], + k=[50.0, 0.01, 200.0], + k33=[10.0, 0.01, 20.0], + ) + + # wel + wellpoints = [(4750.0, 5250.0)] + welcells = g.intersect(wellpoints, "point", 0) + # welspd = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=1, aux_vars=['iface']) + welspd = [[(2, icpl), -150000, 0] for icpl in welcells["nodenumber"]] + wel = flopy.mf6.ModflowGwfwel( + gwf, + print_input=True, + auxiliary=[("iface",)], + stress_period_data=welspd, + ) + + # rch + aux = [np.ones(ncpl, dtype=int) * 6] + rch = flopy.mf6.ModflowGwfrcha( + gwf, recharge=0.005, auxiliary=[("iface",)], aux={0: [6]} + ) + # riv + riverline = [[(Lx - 1.0, Ly), (Lx - 1.0, 0.0)]] + rivcells = g.intersect(riverline, "line", 0) + rivspd = [ + [(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"] + ] + riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd) + + # output control + oc = flopy.mf6.ModflowGwfoc( + gwf, + pname="oc", + budget_filerecord=f"{model_name}.cbb", + head_filerecord=f"{model_name}.hds", + headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + sim.write_simulation() + success, buff = sim.run_simulation(silent=True, report=True) + if success: + for line in buff: + print(line) + else: + raise ValueError("Failed to run.") + + mp_namea = f"{model_name}a_mp" + mp_nameb = f"{model_name}b_mp" + + pcoord = np.array( + [ + [0.000, 0.125, 0.500], + [0.000, 0.375, 0.500], + [0.000, 0.625, 0.500], + [0.000, 0.875, 0.500], + [1.000, 0.125, 0.500], + [1.000, 0.375, 0.500], + [1.000, 0.625, 0.500], + [1.000, 0.875, 0.500], + [0.125, 0.000, 0.500], + [0.375, 0.000, 0.500], + [0.625, 0.000, 0.500], + [0.875, 0.000, 0.500], + [0.125, 1.000, 0.500], + [0.375, 1.000, 0.500], + [0.625, 1.000, 0.500], + [0.875, 1.000, 0.500], + ] + ) + nodew = gwf.disv.ncpl.array * 2 + welcells["nodenumber"][0] + plocs = [nodew for i in range(pcoord.shape[0])] + + # create particle data + pa = flopy.modpath.ParticleData( + plocs, + structured=False, + localx=pcoord[:, 0], + localy=pcoord[:, 1], + localz=pcoord[:, 2], + drape=0, + ) + + # create backward particle group + fpth = f"{mp_namea}.sloc" + pga = flopy.modpath.ParticleGroup( + particlegroupname="BACKWARD1", particledata=pa, filename=fpth + ) + + facedata = flopy.modpath.FaceDataType( + drape=0, + verticaldivisions1=10, + horizontaldivisions1=10, + verticaldivisions2=10, + horizontaldivisions2=10, + verticaldivisions3=10, + horizontaldivisions3=10, + verticaldivisions4=10, + horizontaldivisions4=10, + rowdivisions5=0, + columndivisions5=0, + rowdivisions6=4, + columndivisions6=4, + ) + pb = flopy.modpath.NodeParticleData(subdivisiondata=facedata, nodes=nodew) + # create forward particle group + fpth = f"{mp_nameb}.sloc" + pgb = flopy.modpath.ParticleGroupNodeTemplate( + particlegroupname="BACKWARD2", particledata=pb, filename=fpth + ) + + # create modpath files + mp = flopy.modpath.Modpath7( + modelname=mp_namea, flowmodel=gwf, exe_name="mp7", model_ws=model_ws + ) + flopy.modpath.Modpath7Bas(mp, porosity=0.1) + flopy.modpath.Modpath7Sim( + mp, + simulationtype="combined", + trackingdirection="backward", + weaksinkoption="pass_through", + weaksourceoption="pass_through", + referencetime=0.0, + stoptimeoption="extend", + timepointdata=[500, 1000.0], + particlegroups=pga, + ) + + # write modpath datasets + mp.write_input() + + # run modpath + success, buff = mp.run_model(silent=True, report=True) + if success: + for line in buff: + print(line) + else: + raise ValueError("Failed to run.") + + # create modpath files + mp = flopy.modpath.Modpath7( + modelname=mp_nameb, flowmodel=gwf, exe_name="mp7", model_ws=model_ws + ) + flopy.modpath.Modpath7Bas(mp, porosity=0.1) + flopy.modpath.Modpath7Sim( + mp, + simulationtype="endpoint", + trackingdirection="backward", + weaksinkoption="pass_through", + weaksourceoption="pass_through", + referencetime=0.0, + stoptimeoption="extend", + particlegroups=pgb, + ) + + # write modpath datasets + mp.write_input() + + # run modpath + success, buff = mp.run_model(silent=True, report=True) + assert success, pformat(buff) + + +run_vertex_grid_example(modelpth) # check if model ran properly modelpth = os.path.join(modelpth, "mp7_ex2", "mf6") @@ -681,10 +995,290 @@ # # FloPy's plotting routines can be used with built in styles from the `styles` module. The `styles` module takes advantage of matplotlib's temporary styling routines by reading in pre-built style sheets. Two different types of styles have been built for flopy: `USGSMap()` and `USGSPlot()` styles which can be used to create report quality figures. The styles module also contains a number of methods that can be used for adding axis labels, text, annotations, headings, removing tick lines, and updating the current font. # -# This example will load the Keating groundwater transport model and plot results using `styles` +# This example will run the Keating groundwater transport model and plot results using `styles` # + pycharm={"name": "#%%\n"} -notebook_utils.run_keating_model(modelpth) + +example_name = "ex-gwt-keating" + +# Model units + +length_units = "m" +time_units = "days" + +# Table of model parameters + +nlay = 80 # Number of layers +nrow = 1 # Number of rows +ncol = 400 # Number of columns +delr = 25.0 # Column width ($m$) +delc = 1.0 # Row width ($m$) +delz = 25.0 # Layer thickness ($m$) +top = 2000.0 # Top of model domain ($m$) +bottom = 0.0 # Bottom of model domain ($m$) +hka = 1.0e-12 # Permeability of aquifer ($m^2$) +hkc = 1.0e-18 # Permeability of aquitard ($m^2$) +h1 = 800.0 # Head on left side ($m$) +h2 = 100.0 # Head on right side ($m$) +recharge = 0.5 # Recharge ($kg/s$) +recharge_conc = 1.0 # Normalized recharge concentration (unitless) +alpha_l = 1.0 # Longitudinal dispersivity ($m$) +alpha_th = 1.0 # Transverse horizontal dispersivity ($m$) +alpha_tv = 1.0 # Transverse vertical dispersivity ($m$) +period1 = 730 # Length of first simulation period ($d$) +period2 = 29270.0 # Length of second simulation period ($d$) +porosity = 0.1 # Porosity of mobile domain (unitless) +obs1 = (49, 1, 119) # Layer, row, and column for observation 1 +obs2 = (77, 1, 359) # Layer, row, and column for observation 2 + +obs1 = tuple([i - 1 for i in obs1]) +obs2 = tuple([i - 1 for i in obs2]) +seconds_to_days = 24.0 * 60.0 * 60.0 +permeability_to_conductivity = 1000.0 * 9.81 / 1.0e-3 * seconds_to_days +hka = hka * permeability_to_conductivity +hkc = hkc * permeability_to_conductivity +botm = [top - (k + 1) * delz for k in range(nlay)] +x = np.arange(0, 10000.0, delr) + delr / 2.0 +plotaspect = 1.0 + +# Fill hydraulic conductivity array +hydraulic_conductivity = np.ones((nlay, nrow, ncol), dtype=float) * hka +for k in range(nlay): + if 1000.0 <= botm[k] < 1100.0: + for j in range(ncol): + if 3000.0 <= x[j] <= 6000.0: + hydraulic_conductivity[k, 0, j] = hkc + +# Calculate recharge by converting from kg/s to m/d +rcol = [] +for jcol in range(ncol): + if 4200.0 <= x[jcol] <= 4800.0: + rcol.append(jcol) +number_recharge_cells = len(rcol) +rrate = recharge * seconds_to_days / 1000.0 +cell_area = delr * delc +rrate = rrate / (float(number_recharge_cells) * cell_area) +rchspd = {} +rchspd[0] = [[(0, 0, j), rrate, recharge_conc] for j in rcol] +rchspd[1] = [[(0, 0, j), rrate, 0.0] for j in rcol] + + +def build_mf6gwf(sim_folder): + ws = os.path.join(sim_folder, "mf6-gwt-keating") + name = "flow" + sim_ws = os.path.join(ws, "mf6gwf") + sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=sim_ws, exe_name="mf6") + tdis_ds = ((period1, 1, 1.0), (period2, 1, 1.0)) + flopy.mf6.ModflowTdis( + sim, nper=len(tdis_ds), perioddata=tdis_ds, time_units=time_units + ) + flopy.mf6.ModflowIms( + sim, + print_option="summary", + complexity="complex", + no_ptcrecord="all", + outer_dvclose=1.0e-4, + outer_maximum=2000, + under_relaxation="dbd", + linear_acceleration="BICGSTAB", + under_relaxation_theta=0.7, + under_relaxation_kappa=0.08, + under_relaxation_gamma=0.05, + under_relaxation_momentum=0.0, + backtracking_number=20, + backtracking_tolerance=2.0, + backtracking_reduction_factor=0.2, + backtracking_residual_limit=5.0e-4, + inner_dvclose=1.0e-5, + rcloserecord=[0.0001, "relative_rclose"], + inner_maximum=100, + relaxation_factor=0.0, + number_orthogonalizations=2, + preconditioner_levels=8, + preconditioner_drop_tolerance=0.001, + ) + gwf = flopy.mf6.ModflowGwf( + sim, modelname=name, save_flows=True, newtonoptions=["newton"] + ) + flopy.mf6.ModflowGwfdis( + gwf, + length_units=length_units, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=delr, + delc=delc, + top=top, + botm=botm, + ) + flopy.mf6.ModflowGwfnpf( + gwf, + save_specific_discharge=True, + save_saturation=True, + icelltype=1, + k=hydraulic_conductivity, + ) + flopy.mf6.ModflowGwfic(gwf, strt=600.0) + chdspd = [[(k, 0, 0), h1] for k in range(nlay) if botm[k] < h1] + chdspd += [[(k, 0, ncol - 1), h2] for k in range(nlay) if botm[k] < h2] + flopy.mf6.ModflowGwfchd( + gwf, + stress_period_data=chdspd, + print_input=True, + print_flows=True, + save_flows=False, + pname="CHD-1", + ) + flopy.mf6.ModflowGwfrch( + gwf, + stress_period_data=rchspd, + auxiliary=["concentration"], + pname="RCH-1", + ) + + head_filerecord = f"{name}.hds" + budget_filerecord = f"{name}.bud" + flopy.mf6.ModflowGwfoc( + gwf, + head_filerecord=head_filerecord, + budget_filerecord=budget_filerecord, + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + return sim + + +def build_mf6gwt(sim_folder): + ws = os.path.join(sim_folder, "mf6-gwt-keating") + name = "trans" + sim_ws = os.path.join(ws, "mf6gwt") + sim = flopy.mf6.MFSimulation( + sim_name=name, + sim_ws=sim_ws, + exe_name="mf6", + continue_=True, + ) + tdis_ds = ((period1, 73, 1.0), (period2, 2927, 1.0)) + flopy.mf6.ModflowTdis( + sim, nper=len(tdis_ds), perioddata=tdis_ds, time_units=time_units + ) + flopy.mf6.ModflowIms( + sim, + print_option="summary", + outer_dvclose=1.0e-4, + outer_maximum=100, + under_relaxation="none", + linear_acceleration="BICGSTAB", + rcloserecord=[1000.0, "strict"], + inner_maximum=20, + inner_dvclose=1.0e-4, + relaxation_factor=0.0, + number_orthogonalizations=2, + preconditioner_levels=8, + preconditioner_drop_tolerance=0.001, + ) + gwt = flopy.mf6.ModflowGwt(sim, modelname=name, save_flows=True) + flopy.mf6.ModflowGwtdis( + gwt, + length_units=length_units, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=delr, + delc=delc, + top=top, + botm=botm, + ) + flopy.mf6.ModflowGwtic(gwt, strt=0) + flopy.mf6.ModflowGwtmst(gwt, porosity=porosity) + flopy.mf6.ModflowGwtadv(gwt, scheme="upstream") + flopy.mf6.ModflowGwtdsp( + gwt, xt3d_off=True, alh=alpha_l, ath1=alpha_th, atv=alpha_tv + ) + pd = [ + ("GWFHEAD", "../mf6gwf/flow.hds"), + ("GWFBUDGET", "../mf6gwf/flow.bud"), + ] + flopy.mf6.ModflowGwtfmi( + gwt, flow_imbalance_correction=True, packagedata=pd + ) + sourcerecarray = [ + ("RCH-1", "AUX", "CONCENTRATION"), + ] + flopy.mf6.ModflowGwtssm(gwt, sources=sourcerecarray) + saverecord = { + 0: [ + ("CONCENTRATION", "STEPS", 10), + ("CONCENTRATION", "LAST"), + ("CONCENTRATION", "FREQUENCY", 10), + ], + 1: [ + ("CONCENTRATION", "STEPS", 27, 227), + ("CONCENTRATION", "LAST"), + ("CONCENTRATION", "FREQUENCY", 10), + ], + } + flopy.mf6.ModflowGwtoc( + gwt, + budget_filerecord=f"{name}.cbc", + concentration_filerecord=f"{name}.ucn", + concentrationprintrecord=[ + ("COLUMNS", ncol, "WIDTH", 15, "DIGITS", 6, "GENERAL") + ], + saverecord=saverecord, + printrecord=[ + ("CONCENTRATION", "LAST"), + ( + "BUDGET", + "ALL", + ), + ], + ) + obs_data = { + f"{name}.obs.csv": [ + ("obs1", "CONCENTRATION", obs1), + ("obs2", "CONCENTRATION", obs2), + ], + } + flopy.mf6.ModflowUtlobs( + gwt, digits=10, print_input=True, continuous=obs_data + ) + return sim + + +def build_model(ws): + sim_mf6gwf = build_mf6gwf(ws) + sim_mf6gwt = build_mf6gwt(ws) + sim_mf2005 = None # build_mf2005(sim_name) + sim_mt3dms = None # build_mt3dms(sim_name, sim_mf2005) + sims = (sim_mf6gwf, sim_mf6gwt, sim_mf2005, sim_mt3dms) + return sims + + +def write_model(sims, silent=True): + sim_mf6gwf, sim_mf6gwt, sim_mf2005, sim_mt3dms = sims + sim_mf6gwf.write_simulation(silent=silent) + sim_mf6gwt.write_simulation(silent=silent) + + +def run_keating_model(ws=example_name, silent=True): + sim = build_model(ws) + write_model(sim, silent=silent) + sim_mf6gwf, sim_mf6gwt, sim_mf2005, sim_mt3dms = sim + + print("Running mf6gwf model...") + success, buff = sim_mf6gwf.run_simulation(silent=silent) + if not success: + print(buff) + + print("Running mf6gwt model...") + success, buff = sim_mf6gwt.run_simulation(silent=silent) + if not success: + print(buff) + + return success + + +run_keating_model(modelpth) # + [markdown] pycharm={"name": "#%% md\n"} # Load the flow and transport models diff --git a/.docs/Notebooks/plot_map_view_example.py b/.docs/Notebooks/plot_map_view_example.py index d52969e4f..bb79a16e9 100644 --- a/.docs/Notebooks/plot_map_view_example.py +++ b/.docs/Notebooks/plot_map_view_example.py @@ -36,9 +36,6 @@ import numpy as np import shapefile -sys.path.append(os.path.join("..", "common")) -import notebook_utils - import flopy print(sys.version) @@ -56,8 +53,7 @@ exe_mp = "mp6" # Set the paths -prj_root = notebook_utils.get_project_root_path() -loadpth = str(prj_root / "examples" / "data" / "freyberg") +loadpth = os.path.join("..", "..", "examples", "data", "freyberg") tempdir = TemporaryDirectory() modelpth = tempdir.name @@ -586,7 +582,7 @@ # + pycharm={"name": "#%%\n"} # load the Freyberg model into mf6-flopy and run the simulation sim_name = "mfsim.nam" -sim_path = str(prj_root / "examples" / "data" / "mf6-freyberg") +sim_path = os.path.join("..", "..", "examples", "data", "mf6-freyberg") sim = flopy.mf6.MFSimulation.load( sim_name=sim_name, version=vmf6, exe_name=exe_name_mf6, sim_ws=sim_path ) @@ -702,7 +698,329 @@ # + pycharm={"name": "#%%\n"} # build and run vertex model grid demo problem -notebook_utils.run(modelpth) + + +def run_vertex_grid_example(ws): + """load and run vertex grid example""" + if not os.path.exists(ws): + os.mkdir(ws) + + from flopy.utils.gridgen import Gridgen + + Lx = 10000.0 + Ly = 10500.0 + nlay = 3 + nrow = 21 + ncol = 20 + delr = Lx / ncol + delc = Ly / nrow + top = 400 + botm = [220, 200, 0] + + ms = flopy.modflow.Modflow() + dis5 = flopy.modflow.ModflowDis( + ms, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=delr, + delc=delc, + top=top, + botm=botm, + ) + + model_name = "mp7p2" + model_ws = os.path.join(ws, "mp7_ex2", "mf6") + gridgen_ws = os.path.join(model_ws, "gridgen") + g = Gridgen(ms.modelgrid, model_ws=gridgen_ws) + + rf0shp = os.path.join(gridgen_ws, "rf0") + xmin = 7 * delr + xmax = 12 * delr + ymin = 8 * delc + ymax = 13 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 1, range(nlay)) + + rf1shp = os.path.join(gridgen_ws, "rf1") + xmin = 8 * delr + xmax = 11 * delr + ymin = 9 * delc + ymax = 12 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 2, range(nlay)) + + rf2shp = os.path.join(gridgen_ws, "rf2") + xmin = 9 * delr + xmax = 10 * delr + ymin = 10 * delc + ymax = 11 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 3, range(nlay)) + + g.build(verbose=False) + + gridprops = g.get_gridprops_disv() + ncpl = gridprops["ncpl"] + top = gridprops["top"] + botm = gridprops["botm"] + nvert = gridprops["nvert"] + vertices = gridprops["vertices"] + cell2d = gridprops["cell2d"] + # cellxy = gridprops['cellxy'] + + # create simulation + sim = flopy.mf6.MFSimulation( + sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=model_ws + ) + + # create tdis package + tdis_rc = [(1000.0, 1, 1.0)] + tdis = flopy.mf6.ModflowTdis( + sim, pname="tdis", time_units="DAYS", perioddata=tdis_rc + ) + + # create gwf model + gwf = flopy.mf6.ModflowGwf( + sim, modelname=model_name, model_nam_file=f"{model_name}.nam" + ) + gwf.name_file.save_flows = True + + # create iterative model solution and register the gwf model with it + ims = flopy.mf6.ModflowIms( + sim, + pname="ims", + print_option="SUMMARY", + complexity="SIMPLE", + outer_hclose=1.0e-5, + outer_maximum=100, + under_relaxation="NONE", + inner_maximum=100, + inner_hclose=1.0e-6, + rcloserecord=0.1, + linear_acceleration="BICGSTAB", + scaling_method="NONE", + reordering_method="NONE", + relaxation_factor=0.99, + ) + sim.register_ims_package(ims, [gwf.name]) + + # disv + disv = flopy.mf6.ModflowGwfdisv( + gwf, + nlay=nlay, + ncpl=ncpl, + top=top, + botm=botm, + nvert=nvert, + vertices=vertices, + cell2d=cell2d, + ) + + # initial conditions + ic = flopy.mf6.ModflowGwfic(gwf, pname="ic", strt=320.0) + + # node property flow + npf = flopy.mf6.ModflowGwfnpf( + gwf, + xt3doptions=[("xt3d")], + save_specific_discharge=True, + icelltype=[1, 0, 0], + k=[50.0, 0.01, 200.0], + k33=[10.0, 0.01, 20.0], + ) + + # wel + wellpoints = [(4750.0, 5250.0)] + welcells = g.intersect(wellpoints, "point", 0) + # welspd = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=1, aux_vars=['iface']) + welspd = [[(2, icpl), -150000, 0] for icpl in welcells["nodenumber"]] + wel = flopy.mf6.ModflowGwfwel( + gwf, + print_input=True, + auxiliary=[("iface",)], + stress_period_data=welspd, + ) + + # rch + aux = [np.ones(ncpl, dtype=int) * 6] + rch = flopy.mf6.ModflowGwfrcha( + gwf, recharge=0.005, auxiliary=[("iface",)], aux={0: [6]} + ) + # riv + riverline = [[(Lx - 1.0, Ly), (Lx - 1.0, 0.0)]] + rivcells = g.intersect(riverline, "line", 0) + rivspd = [ + [(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"] + ] + riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd) + + # output control + oc = flopy.mf6.ModflowGwfoc( + gwf, + pname="oc", + budget_filerecord=f"{model_name}.cbb", + head_filerecord=f"{model_name}.hds", + headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + sim.write_simulation() + success, buff = sim.run_simulation(silent=True, report=True) + if success: + for line in buff: + print(line) + else: + raise ValueError("Failed to run.") + + mp_namea = f"{model_name}a_mp" + mp_nameb = f"{model_name}b_mp" + + pcoord = np.array( + [ + [0.000, 0.125, 0.500], + [0.000, 0.375, 0.500], + [0.000, 0.625, 0.500], + [0.000, 0.875, 0.500], + [1.000, 0.125, 0.500], + [1.000, 0.375, 0.500], + [1.000, 0.625, 0.500], + [1.000, 0.875, 0.500], + [0.125, 0.000, 0.500], + [0.375, 0.000, 0.500], + [0.625, 0.000, 0.500], + [0.875, 0.000, 0.500], + [0.125, 1.000, 0.500], + [0.375, 1.000, 0.500], + [0.625, 1.000, 0.500], + [0.875, 1.000, 0.500], + ] + ) + nodew = gwf.disv.ncpl.array * 2 + welcells["nodenumber"][0] + plocs = [nodew for i in range(pcoord.shape[0])] + + # create particle data + pa = flopy.modpath.ParticleData( + plocs, + structured=False, + localx=pcoord[:, 0], + localy=pcoord[:, 1], + localz=pcoord[:, 2], + drape=0, + ) + + # create backward particle group + fpth = f"{mp_namea}.sloc" + pga = flopy.modpath.ParticleGroup( + particlegroupname="BACKWARD1", particledata=pa, filename=fpth + ) + + facedata = flopy.modpath.FaceDataType( + drape=0, + verticaldivisions1=10, + horizontaldivisions1=10, + verticaldivisions2=10, + horizontaldivisions2=10, + verticaldivisions3=10, + horizontaldivisions3=10, + verticaldivisions4=10, + horizontaldivisions4=10, + rowdivisions5=0, + columndivisions5=0, + rowdivisions6=4, + columndivisions6=4, + ) + pb = flopy.modpath.NodeParticleData(subdivisiondata=facedata, nodes=nodew) + # create forward particle group + fpth = f"{mp_nameb}.sloc" + pgb = flopy.modpath.ParticleGroupNodeTemplate( + particlegroupname="BACKWARD2", particledata=pb, filename=fpth + ) + + # create modpath files + mp = flopy.modpath.Modpath7( + modelname=mp_namea, flowmodel=gwf, exe_name="mp7", model_ws=model_ws + ) + flopy.modpath.Modpath7Bas(mp, porosity=0.1) + flopy.modpath.Modpath7Sim( + mp, + simulationtype="combined", + trackingdirection="backward", + weaksinkoption="pass_through", + weaksourceoption="pass_through", + referencetime=0.0, + stoptimeoption="extend", + timepointdata=[500, 1000.0], + particlegroups=pga, + ) + + # write modpath datasets + mp.write_input() + + # run modpath + success, buff = mp.run_model(silent=True, report=True) + if success: + for line in buff: + print(line) + else: + raise ValueError("Failed to run.") + + # create modpath files + mp = flopy.modpath.Modpath7( + modelname=mp_nameb, flowmodel=gwf, exe_name="mp7", model_ws=model_ws + ) + flopy.modpath.Modpath7Bas(mp, porosity=0.1) + flopy.modpath.Modpath7Sim( + mp, + simulationtype="endpoint", + trackingdirection="backward", + weaksinkoption="pass_through", + weaksourceoption="pass_through", + referencetime=0.0, + stoptimeoption="extend", + particlegroups=pgb, + ) + + # write modpath datasets + mp.write_input() + + # run modpath + success, buff = mp.run_model(silent=True, report=True) + assert success, pformat(buff) + + +run_vertex_grid_example(modelpth) # check if model ran properly modelpth = os.path.join(modelpth, "mp7_ex2", "mf6") @@ -879,7 +1197,7 @@ from flopy.discretization import UnstructuredGrid # this is a folder containing some unstructured grids -datapth = str(prj_root / "examples" / "data" / "unstructured") +datapth = os.path.join("..", "..", "examples", "data", "unstructured") # simple functions to load vertices and incidence lists diff --git a/.docs/Notebooks/raster_intersection_example.py b/.docs/Notebooks/raster_intersection_example.py index 4c5eecbcd..447581faa 100644 --- a/.docs/Notebooks/raster_intersection_example.py +++ b/.docs/Notebooks/raster_intersection_example.py @@ -1,11 +1,12 @@ # --- # jupyter: # jupytext: +# notebook_metadata_filter: metadata # text_representation: # extension: .py # format_name: light # format_version: '1.5' -# jupytext_version: 1.14.5 +# jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python @@ -570,6 +571,30 @@ # The `ibound` array and the `top` array can be used to build or edit the BAS and DIS file objects in FloPy +# ## Raster re-projection +# +# The `Raster` class has a built in `to_crs()` method that allows for raster reprojection. The `to_crs()` method has two possible parameters that can be used to define reprojection and one additional parameter for in place reprojection: +# +# - `crs`: the crs parameter can take many different formats of coordinate refence systems (WKT string, epsg code, pyproj.CRS, rasterio.CRS, proj4 string, epsg string, etc...) +# - `epsg`: integer epsg number +# - `inplace`: bool, default False creates a new raster object, True modifies the existing Raster object +# +# Here's example usage: + +cur_crs = rio.crs +print(cur_crs) +print(rio.transform) + +rio_reproj = rio.to_crs(crs="EPSG:4326") # WGS84 dec. lat lon +print(rio_reproj.crs) +print(rio_reproj.transform) + +# Reproject as an inplace operation + +rio.to_crs(epsg=4326, inplace=True) +print(rio.crs) +print(rio.transform) + # ## Future development # # Potential features that draw on this functionality could include: diff --git a/.docs/Notebooks/seawat_henry_example.py b/.docs/Notebooks/seawat_henry_example.py index 1c8575460..7307f9015 100644 --- a/.docs/Notebooks/seawat_henry_example.py +++ b/.docs/Notebooks/seawat_henry_example.py @@ -10,6 +10,7 @@ # kernelspec: # display_name: Python 3 (ipykernel) # language: python +# section: mt3d # name: python3 # --- diff --git a/.docs/Notebooks/sfrpackage_example.py b/.docs/Notebooks/sfrpackage_example.py index 60fb87757..6ce4273e9 100644 --- a/.docs/Notebooks/sfrpackage_example.py +++ b/.docs/Notebooks/sfrpackage_example.py @@ -33,8 +33,6 @@ import glob import os import shutil - -# + import sys from pprint import pformat from tempfile import TemporaryDirectory @@ -237,7 +235,7 @@ bpth = os.path.join(path, "test1ss.cbc") cbbobj = bf.CellBudgetFile(bpth) -cbbobj.list_records() +cbbobj.headers sfrleak = cbbobj.get_data(text=" STREAM LEAKAGE")[0] sfrleak[sfrleak == 0] = np.nan # remove zero values diff --git a/.docs/Notebooks/uzf_example.py b/.docs/Notebooks/uzf_example.py index 67f934d0a..ab206e0c3 100644 --- a/.docs/Notebooks/uzf_example.py +++ b/.docs/Notebooks/uzf_example.py @@ -238,7 +238,6 @@ avail = os.path.isfile(fpth) if avail: uzfbdobjct = flopy.utils.CellBudgetFile(fpth) - uzfbdobjct.list_records() else: print(f'"{fpth}" is not available') diff --git a/.docs/Notebooks/zonebudget_example.py b/.docs/Notebooks/zonebudget_example.py index 2ea5da4e5..ea9ee0bc0 100644 --- a/.docs/Notebooks/zonebudget_example.py +++ b/.docs/Notebooks/zonebudget_example.py @@ -111,7 +111,7 @@ inyrbud = inyr.get_budget() names = ["FROM_RECHARGE"] -rowidx = np.in1d(cmdbud["name"], names) +rowidx = np.isin(cmdbud["name"], names) colidx = "ZONE_1" print(f"{cmdbud[rowidx][colidx][0]:,.1f} cubic meters/day") diff --git a/.docs/common/groundwater2023_utils.py b/.docs/common/groundwater2023_utils.py deleted file mode 100644 index 7574611d9..000000000 --- a/.docs/common/groundwater2023_utils.py +++ /dev/null @@ -1,198 +0,0 @@ -import numpy as np -import shapely -from shapely.geometry import Polygon - -from flopy.utils.gridintersect import GridIntersect - -geometries = { - "boundary": """1.868012422360248456e+05 4.695652173913043953e+04 - 1.790372670807453396e+05 5.204968944099379587e+04 - 1.729813664596273447e+05 5.590062111801243009e+04 - 1.672360248447204940e+05 5.987577639751553215e+04 - 1.631987577639751253e+05 6.335403726708075556e+04 - 1.563664596273291972e+05 6.819875776397516893e+04 - 1.509316770186335489e+05 7.229813664596274612e+04 - 1.453416149068323139e+05 7.527950310559007630e+04 - 1.395962732919254631e+05 7.627329192546584818e+04 - 1.357142857142857101e+05 7.664596273291927355e+04 - 1.329192546583850926e+05 7.751552795031057030e+04 - 1.268633540372670832e+05 8.062111801242237561e+04 - 1.218944099378881947e+05 8.285714285714286962e+04 - 1.145962732919254486e+05 8.571428571428572468e+04 - 1.069875776397515583e+05 8.869565217391305487e+04 - 1.023291925465838431e+05 8.931677018633540138e+04 - 9.456521739130433707e+04 9.068322981366459862e+04 - 8.804347826086955320e+04 9.080745341614908830e+04 - 7.950310559006211406e+04 9.267080745341615693e+04 - 7.562111801242236106e+04 9.391304347826087906e+04 - 6.692546583850930620e+04 9.602484472049689793e+04 - 5.667701863354037778e+04 9.763975155279504543e+04 - 4.906832298136646568e+04 9.689440993788820924e+04 - 3.897515527950309479e+04 9.540372670807455142e+04 - 3.167701863354036323e+04 9.304347826086958230e+04 - 2.375776397515527788e+04 8.757763975155279331e+04 - 1.847826086956521613e+04 8.161490683229814749e+04 - 1.164596273291925172e+04 7.739130434782608063e+04 - 6.211180124223596977e+03 7.055900621118013805e+04 - 4.347826086956512881e+03 6.422360248447205959e+04 - 1.863354037267072272e+03 6.037267080745341809e+04 - 2.639751552795024509e+03 5.602484472049689793e+04 - 1.552795031055893560e+03 5.279503105590062478e+04 - 7.763975155279410956e+02 4.186335403726709046e+04 - 2.018633540372667312e+03 3.813664596273292409e+04 - 6.055900621118013078e+03 3.341614906832297856e+04 - 1.335403726708074100e+04 2.782608695652173992e+04 - 2.577639751552794405e+04 2.086956521739130767e+04 - 3.416149068322980747e+04 1.763975155279503815e+04 - 4.642857142857142753e+04 1.440993788819875044e+04 - 5.636645962732918997e+04 1.130434782608694877e+04 - 6.459627329192546313e+04 9.813664596273290954e+03 - 8.555900621118012350e+04 6.832298136645956220e+03 - 9.829192546583850344e+04 5.093167701863346338e+03 - 1.085403726708074391e+05 4.347826086956525614e+03 - 1.200310559006211115e+05 4.223602484472040487e+03 - 1.296583850931677007e+05 4.347826086956525614e+03 - 1.354037267080745369e+05 5.590062111801232277e+03 - 1.467391304347825935e+05 1.267080745341615875e+04 - 1.563664596273291972e+05 1.937888198757762802e+04 - 1.630434782608695677e+05 2.198757763975155467e+04 - 1.694099378881987650e+05 2.434782608695652743e+04 - 1.782608695652173774e+05 2.981366459627329095e+04 - 1.833850931677018234e+05 3.180124223602484562e+04 - 1.868012422360248456e+05 3.577639751552795497e+04""", - "streamseg1": """1.868012422360248456e+05 4.086956521739130403e+04 - 1.824534161490683327e+05 4.086956521739130403e+04 - 1.770186335403726553e+05 4.124223602484472940e+04 - 1.737577639751552779e+05 4.186335403726709046e+04 - 1.703416149068323139e+05 4.310559006211180531e+04 - 1.670807453416148783e+05 4.397515527950310934e+04 - 1.636645962732919143e+05 4.484472049689441337e+04 - 1.590062111801242281e+05 4.559006211180124228e+04 - 1.555900621118012350e+05 4.559006211180124228e+04 - 1.510869565217391064e+05 4.546583850931677443e+04 - 1.479813664596273156e+05 4.534161490683229931e+04 - 1.453416149068323139e+05 4.496894409937888850e+04 - 1.377329192546583654e+05 4.447204968944099528e+04 - 1.326086956521739194e+05 4.447204968944099528e+04 - 1.285714285714285652e+05 4.434782608695652743e+04 - 1.245341614906832110e+05 4.472049689440993825e+04 - 1.215838509316770069e+05 4.509316770186335634e+04 - 1.161490683229813585e+05 4.509316770186335634e+04 - 1.125776397515527933e+05 4.459627329192547040e+04 - 1.074534161490683036e+05 4.385093167701864149e+04 - 1.018633540372670686e+05 4.347826086956522340e+04 - 9.798136645962731563e+04 4.360248447204969125e+04 - 9.223602484472049400e+04 4.310559006211180531e+04 - 8.602484472049689793e+04 4.198757763975155831e+04 - 7.981366459627327276e+04 4.173913043478261534e+04 - 7.468944099378881219e+04 4.248447204968944425e+04 - 7.034161490683228476e+04 4.385093167701864149e+04 - 6.785714285714285506e+04 4.621118012422360334e+04 - 6.583850931677018525e+04 4.919254658385094081e+04 - 6.319875776397513982e+04 5.192546583850932075e+04 - 6.009316770186335634e+04 5.677018633540373412e+04 - 5.605590062111800216e+04 5.950310559006211406e+04 - 5.279503105590060295e+04 6.124223602484472940e+04 - 4.751552795031056303e+04 6.211180124223603343e+04 - 3.990683229813664366e+04 6.335403726708075556e+04 - 3.276397515527949508e+04 6.409937888198757719e+04 - 2.934782608695651652e+04 6.509316770186336362e+04 - 2.546583850931676716e+04 6.832298136645962950e+04""", - "streamseg2": """7.025161490683228476e+04 4.375093167701864149e+04 - 6.816770186335404287e+04 4.273291925465839449e+04 - 6.490683229813665093e+04 4.211180124223603343e+04 - 6.164596273291925900e+04 4.173913043478262261e+04 - 5.776397515527951327e+04 4.124223602484472940e+04 - 5.450310559006211406e+04 4.049689440993789322e+04 - 4.984472049689442065e+04 3.937888198757764621e+04 - 4.534161490683231386e+04 3.801242236024845624e+04 - 4.114906832298137306e+04 3.664596273291926627e+04 - 3.913043478260868869e+04 3.565217391304348712e+04 - 3.649068322981366509e+04 3.416149068322981475e+04 - 3.322981366459628043e+04 3.242236024844721760e+04 - 3.012422360248447148e+04 3.105590062111801672e+04 - 2.608695652173913550e+04 2.957521739130435890e+04""", - "streamseg3": """1.059006211180124228e+05 4.335403726708074828e+04 - 1.029503105590062187e+05 4.223602484472050128e+04 - 1.004658385093167890e+05 4.024844720496894297e+04 - 9.937888198757765349e+04 3.788819875776398112e+04 - 9.627329192546584818e+04 3.490683229813664366e+04 - 9.285714285714286962e+04 3.316770186335403559e+04 - 8.897515527950311662e+04 3.093167701863354159e+04 - 8.338509316770188161e+04 2.795031055900621504e+04 - 7.872670807453416637e+04 2.670807453416148928e+04 - 7.329192546583851799e+04 2.385093167701863058e+04 - 6.863354037267081731e+04 2.111801242236025064e+04 - 6.304347826086958230e+04 1.863354037267081003e+04""", - "streamseg4": """1.371118012422360480e+05 4.472049689440994553e+04 - 1.321428571428571595e+05 4.720496894409938250e+04 - 1.285714285714285652e+05 4.981366459627330187e+04 - 1.243788819875776535e+05 5.341614906832298584e+04 - 1.189440993788819906e+05 5.540372670807454415e+04 - 1.125776397515527933e+05 5.627329192546584818e+04 - 1.065217391304347839e+05 5.726708074534162733e+04 - 1.020186335403726698e+05 5.913043478260870324e+04 - 9.409937888198759174e+04 6.273291925465840177e+04 - 9.192546583850932075e+04 6.633540372670808574e+04 - 8.881987577639751544e+04 7.242236024844722124e+04 - 8.586956521739131131e+04 7.552795031055902655e+04 - 8.369565217391305487e+04 7.962732919254660374e+04""", -} - - -def string2geom(geostring, conversion=None): - if conversion is None: - multiplier = 1.0 - else: - multiplier = float(conversion) - res = [] - for line in geostring.split("\n"): - line = line.strip() - line = line.split(" ") - x = float(line[0]) * multiplier - y = float(line[1]) * multiplier - res.append((x, y)) - return res - - -def densify_geometry(line, step, keep_internal_nodes=True): - xy = [] # list of tuple of coordinates - lines_strings = [] - if keep_internal_nodes: - for idx in range(1, len(line)): - lines_strings.append( - shapely.geometry.LineString(line[idx - 1 : idx + 1]) - ) - else: - lines_strings = [shapely.geometry.LineString(line)] - - for line_string in lines_strings: - length_m = line_string.length # get the length - for distance in np.arange(0, length_m + step, step): - point = line_string.interpolate(distance) - xy_tuple = (point.x, point.y) - if xy_tuple not in xy: - xy.append(xy_tuple) - # make sure the end point is in xy - if keep_internal_nodes: - xy_tuple = line_string.coords[-1] - if xy_tuple not in xy: - xy.append(xy_tuple) - - return xy - - -# function to set the active and inactive model area -def set_idomain(grid, boundary): - ix = GridIntersect(grid, method="vertex", rtree=True) - result = ix.intersect(Polygon(boundary)) - idx = [coords for coords in result.cellids] - idx = np.array(idx, dtype=int) - nr = idx.shape[0] - if idx.ndim == 1: - idx = idx.reshape((nr, 1)) - idx = tuple([idx[:, i] for i in range(idx.shape[1])]) - idomain = np.zeros(grid.shape[1:], dtype=int) - idomain[idx] = 1 - idomain = idomain.reshape(grid.shape) - grid.idomain = idomain diff --git a/.docs/common/notebook_utils.py b/.docs/common/notebook_utils.py deleted file mode 100644 index b805d4c92..000000000 --- a/.docs/common/notebook_utils.py +++ /dev/null @@ -1,776 +0,0 @@ -import os -import sys -from pathlib import Path - -import numpy as np - -try: - import flopy -except ImportError: - fpth = os.path.abspath(os.path.join("..", "..", "..")) - sys.path.append(fpth) - import flopy - - -def get_project_root_path() -> Path: - return Path.cwd().parent.parent - - -geometries = { - "boundary": """1.868012422360248456e+05 4.695652173913043953e+04 - 1.790372670807453396e+05 5.204968944099379587e+04 - 1.729813664596273447e+05 5.590062111801243009e+04 - 1.672360248447204940e+05 5.987577639751553215e+04 - 1.631987577639751253e+05 6.335403726708075556e+04 - 1.563664596273291972e+05 6.819875776397516893e+04 - 1.509316770186335489e+05 7.229813664596274612e+04 - 1.453416149068323139e+05 7.527950310559007630e+04 - 1.395962732919254631e+05 7.627329192546584818e+04 - 1.357142857142857101e+05 7.664596273291927355e+04 - 1.329192546583850926e+05 7.751552795031057030e+04 - 1.268633540372670832e+05 8.062111801242237561e+04 - 1.218944099378881947e+05 8.285714285714286962e+04 - 1.145962732919254486e+05 8.571428571428572468e+04 - 1.069875776397515583e+05 8.869565217391305487e+04 - 1.023291925465838431e+05 8.931677018633540138e+04 - 9.456521739130433707e+04 9.068322981366459862e+04 - 8.804347826086955320e+04 9.080745341614908830e+04 - 7.950310559006211406e+04 9.267080745341615693e+04 - 7.562111801242236106e+04 9.391304347826087906e+04 - 6.692546583850930620e+04 9.602484472049689793e+04 - 5.667701863354037778e+04 9.763975155279504543e+04 - 4.906832298136646568e+04 9.689440993788820924e+04 - 3.897515527950309479e+04 9.540372670807455142e+04 - 3.167701863354036323e+04 9.304347826086958230e+04 - 2.375776397515527788e+04 8.757763975155279331e+04 - 1.847826086956521613e+04 8.161490683229814749e+04 - 1.164596273291925172e+04 7.739130434782608063e+04 - 6.211180124223596977e+03 7.055900621118013805e+04 - 4.347826086956512881e+03 6.422360248447205959e+04 - 1.863354037267072272e+03 6.037267080745341809e+04 - 2.639751552795024509e+03 5.602484472049689793e+04 - 1.552795031055893560e+03 5.279503105590062478e+04 - 7.763975155279410956e+02 4.186335403726709046e+04 - 2.018633540372667312e+03 3.813664596273292409e+04 - 6.055900621118013078e+03 3.341614906832297856e+04 - 1.335403726708074100e+04 2.782608695652173992e+04 - 2.577639751552794405e+04 2.086956521739130767e+04 - 3.416149068322980747e+04 1.763975155279503815e+04 - 4.642857142857142753e+04 1.440993788819875044e+04 - 5.636645962732918997e+04 1.130434782608694877e+04 - 6.459627329192546313e+04 9.813664596273290954e+03 - 8.555900621118012350e+04 6.832298136645956220e+03 - 9.829192546583850344e+04 5.093167701863346338e+03 - 1.085403726708074391e+05 4.347826086956525614e+03 - 1.200310559006211115e+05 4.223602484472040487e+03 - 1.296583850931677007e+05 4.347826086956525614e+03 - 1.354037267080745369e+05 5.590062111801232277e+03 - 1.467391304347825935e+05 1.267080745341615875e+04 - 1.563664596273291972e+05 1.937888198757762802e+04 - 1.630434782608695677e+05 2.198757763975155467e+04 - 1.694099378881987650e+05 2.434782608695652743e+04 - 1.782608695652173774e+05 2.981366459627329095e+04 - 1.833850931677018234e+05 3.180124223602484562e+04 - 1.868012422360248456e+05 3.577639751552795497e+04""", - "streamseg1": """1.868012422360248456e+05 4.086956521739130403e+04 - 1.824534161490683327e+05 4.086956521739130403e+04 - 1.770186335403726553e+05 4.124223602484472940e+04 - 1.737577639751552779e+05 4.186335403726709046e+04 - 1.703416149068323139e+05 4.310559006211180531e+04 - 1.670807453416148783e+05 4.397515527950310934e+04 - 1.636645962732919143e+05 4.484472049689441337e+04 - 1.590062111801242281e+05 4.559006211180124228e+04 - 1.555900621118012350e+05 4.559006211180124228e+04 - 1.510869565217391064e+05 4.546583850931677443e+04 - 1.479813664596273156e+05 4.534161490683229931e+04 - 1.453416149068323139e+05 4.496894409937888850e+04 - 1.377329192546583654e+05 4.447204968944099528e+04 - 1.326086956521739194e+05 4.447204968944099528e+04 - 1.285714285714285652e+05 4.434782608695652743e+04 - 1.245341614906832110e+05 4.472049689440993825e+04 - 1.215838509316770069e+05 4.509316770186335634e+04 - 1.161490683229813585e+05 4.509316770186335634e+04 - 1.125776397515527933e+05 4.459627329192547040e+04 - 1.074534161490683036e+05 4.385093167701864149e+04 - 1.018633540372670686e+05 4.347826086956522340e+04 - 9.798136645962731563e+04 4.360248447204969125e+04 - 9.223602484472049400e+04 4.310559006211180531e+04 - 8.602484472049689793e+04 4.198757763975155831e+04 - 7.981366459627327276e+04 4.173913043478261534e+04 - 7.468944099378881219e+04 4.248447204968944425e+04 - 7.034161490683228476e+04 4.385093167701864149e+04 - 6.785714285714285506e+04 4.621118012422360334e+04 - 6.583850931677018525e+04 4.919254658385094081e+04 - 6.319875776397513982e+04 5.192546583850932075e+04 - 6.009316770186335634e+04 5.677018633540373412e+04 - 5.605590062111800216e+04 5.950310559006211406e+04 - 5.279503105590060295e+04 6.124223602484472940e+04 - 4.751552795031056303e+04 6.211180124223603343e+04 - 3.990683229813664366e+04 6.335403726708075556e+04 - 3.276397515527949508e+04 6.409937888198757719e+04 - 2.934782608695651652e+04 6.509316770186336362e+04 - 2.546583850931676716e+04 6.832298136645962950e+04""", - "streamseg2": """7.025161490683228476e+04 4.375093167701864149e+04 - 6.816770186335404287e+04 4.273291925465839449e+04 - 6.490683229813665093e+04 4.211180124223603343e+04 - 6.164596273291925900e+04 4.173913043478262261e+04 - 5.776397515527951327e+04 4.124223602484472940e+04 - 5.450310559006211406e+04 4.049689440993789322e+04 - 4.984472049689442065e+04 3.937888198757764621e+04 - 4.534161490683231386e+04 3.801242236024845624e+04 - 4.114906832298137306e+04 3.664596273291926627e+04 - 3.913043478260868869e+04 3.565217391304348712e+04 - 3.649068322981366509e+04 3.416149068322981475e+04 - 3.322981366459628043e+04 3.242236024844721760e+04 - 3.012422360248447148e+04 3.105590062111801672e+04 - 2.608695652173913550e+04 2.957521739130435890e+04""", - "streamseg3": """1.059006211180124228e+05 4.335403726708074828e+04 - 1.029503105590062187e+05 4.223602484472050128e+04 - 1.004658385093167890e+05 4.024844720496894297e+04 - 9.937888198757765349e+04 3.788819875776398112e+04 - 9.627329192546584818e+04 3.490683229813664366e+04 - 9.285714285714286962e+04 3.316770186335403559e+04 - 8.897515527950311662e+04 3.093167701863354159e+04 - 8.338509316770188161e+04 2.795031055900621504e+04 - 7.872670807453416637e+04 2.670807453416148928e+04 - 7.329192546583851799e+04 2.385093167701863058e+04 - 6.863354037267081731e+04 2.111801242236025064e+04 - 6.304347826086958230e+04 1.863354037267081003e+04""", - "streamseg4": """1.371118012422360480e+05 4.472049689440994553e+04 - 1.321428571428571595e+05 4.720496894409938250e+04 - 1.285714285714285652e+05 4.981366459627330187e+04 - 1.243788819875776535e+05 5.341614906832298584e+04 - 1.189440993788819906e+05 5.540372670807454415e+04 - 1.125776397515527933e+05 5.627329192546584818e+04 - 1.065217391304347839e+05 5.726708074534162733e+04 - 1.020186335403726698e+05 5.913043478260870324e+04 - 9.409937888198759174e+04 6.273291925465840177e+04 - 9.192546583850932075e+04 6.633540372670808574e+04 - 8.881987577639751544e+04 7.242236024844722124e+04 - 8.586956521739131131e+04 7.552795031055902655e+04 - 8.369565217391305487e+04 7.962732919254660374e+04""", -} - - -def string2geom(geostring, conversion=None): - if conversion is None: - multiplier = 1.0 - else: - multiplier = float(conversion) - res = [] - for line in geostring.split("\n"): - line = line.strip() - line = line.split(" ") - x = float(line[0]) * multiplier - y = float(line[1]) * multiplier - res.append((x, y)) - return res - - -def run(ws): - ## load and run vertex grid example - # run installed version of flopy or add local path - if not os.path.exists(ws): - os.mkdir(ws) - - from flopy.utils.gridgen import Gridgen - - Lx = 10000.0 - Ly = 10500.0 - nlay = 3 - nrow = 21 - ncol = 20 - delr = Lx / ncol - delc = Ly / nrow - top = 400 - botm = [220, 200, 0] - - ms = flopy.modflow.Modflow() - dis5 = flopy.modflow.ModflowDis( - ms, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - top=top, - botm=botm, - ) - - model_name = "mp7p2" - model_ws = os.path.join(ws, "mp7_ex2", "mf6") - gridgen_ws = os.path.join(model_ws, "gridgen") - g = Gridgen(ms.modelgrid, model_ws=gridgen_ws) - - rf0shp = os.path.join(gridgen_ws, "rf0") - xmin = 7 * delr - xmax = 12 * delr - ymin = 8 * delc - ymax = 13 * delc - rfpoly = [ - [ - [ - (xmin, ymin), - (xmax, ymin), - (xmax, ymax), - (xmin, ymax), - (xmin, ymin), - ] - ] - ] - g.add_refinement_features(rfpoly, "polygon", 1, range(nlay)) - - rf1shp = os.path.join(gridgen_ws, "rf1") - xmin = 8 * delr - xmax = 11 * delr - ymin = 9 * delc - ymax = 12 * delc - rfpoly = [ - [ - [ - (xmin, ymin), - (xmax, ymin), - (xmax, ymax), - (xmin, ymax), - (xmin, ymin), - ] - ] - ] - g.add_refinement_features(rfpoly, "polygon", 2, range(nlay)) - - rf2shp = os.path.join(gridgen_ws, "rf2") - xmin = 9 * delr - xmax = 10 * delr - ymin = 10 * delc - ymax = 11 * delc - rfpoly = [ - [ - [ - (xmin, ymin), - (xmax, ymin), - (xmax, ymax), - (xmin, ymax), - (xmin, ymin), - ] - ] - ] - g.add_refinement_features(rfpoly, "polygon", 3, range(nlay)) - - g.build(verbose=False) - - gridprops = g.get_gridprops_disv() - ncpl = gridprops["ncpl"] - top = gridprops["top"] - botm = gridprops["botm"] - nvert = gridprops["nvert"] - vertices = gridprops["vertices"] - cell2d = gridprops["cell2d"] - # cellxy = gridprops['cellxy'] - - # create simulation - sim = flopy.mf6.MFSimulation( - sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=model_ws - ) - - # create tdis package - tdis_rc = [(1000.0, 1, 1.0)] - tdis = flopy.mf6.ModflowTdis( - sim, pname="tdis", time_units="DAYS", perioddata=tdis_rc - ) - - # create gwf model - gwf = flopy.mf6.ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) - gwf.name_file.save_flows = True - - # create iterative model solution and register the gwf model with it - ims = flopy.mf6.ModflowIms( - sim, - pname="ims", - print_option="SUMMARY", - complexity="SIMPLE", - outer_hclose=1.0e-5, - outer_maximum=100, - under_relaxation="NONE", - inner_maximum=100, - inner_hclose=1.0e-6, - rcloserecord=0.1, - linear_acceleration="BICGSTAB", - scaling_method="NONE", - reordering_method="NONE", - relaxation_factor=0.99, - ) - sim.register_ims_package(ims, [gwf.name]) - - # disv - disv = flopy.mf6.ModflowGwfdisv( - gwf, - nlay=nlay, - ncpl=ncpl, - top=top, - botm=botm, - nvert=nvert, - vertices=vertices, - cell2d=cell2d, - ) - - # initial conditions - ic = flopy.mf6.ModflowGwfic(gwf, pname="ic", strt=320.0) - - # node property flow - npf = flopy.mf6.ModflowGwfnpf( - gwf, - xt3doptions=[("xt3d")], - save_specific_discharge=True, - icelltype=[1, 0, 0], - k=[50.0, 0.01, 200.0], - k33=[10.0, 0.01, 20.0], - ) - - # wel - wellpoints = [(4750.0, 5250.0)] - welcells = g.intersect(wellpoints, "point", 0) - # welspd = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=1, aux_vars=['iface']) - welspd = [[(2, icpl), -150000, 0] for icpl in welcells["nodenumber"]] - wel = flopy.mf6.ModflowGwfwel( - gwf, - print_input=True, - auxiliary=[("iface",)], - stress_period_data=welspd, - ) - - # rch - aux = [np.ones(ncpl, dtype=int) * 6] - rch = flopy.mf6.ModflowGwfrcha( - gwf, recharge=0.005, auxiliary=[("iface",)], aux={0: [6]} - ) - # riv - riverline = [[(Lx - 1.0, Ly), (Lx - 1.0, 0.0)]] - rivcells = g.intersect(riverline, "line", 0) - rivspd = [ - [(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"] - ] - riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd) - - # output control - oc = flopy.mf6.ModflowGwfoc( - gwf, - pname="oc", - budget_filerecord=f"{model_name}.cbb", - head_filerecord=f"{model_name}.hds", - headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], - saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - ) - - sim.write_simulation() - success, buff = sim.run_simulation(silent=True, report=True) - if success: - for line in buff: - print(line) - else: - raise ValueError("Failed to run.") - - mp_namea = f"{model_name}a_mp" - mp_nameb = f"{model_name}b_mp" - - pcoord = np.array( - [ - [0.000, 0.125, 0.500], - [0.000, 0.375, 0.500], - [0.000, 0.625, 0.500], - [0.000, 0.875, 0.500], - [1.000, 0.125, 0.500], - [1.000, 0.375, 0.500], - [1.000, 0.625, 0.500], - [1.000, 0.875, 0.500], - [0.125, 0.000, 0.500], - [0.375, 0.000, 0.500], - [0.625, 0.000, 0.500], - [0.875, 0.000, 0.500], - [0.125, 1.000, 0.500], - [0.375, 1.000, 0.500], - [0.625, 1.000, 0.500], - [0.875, 1.000, 0.500], - ] - ) - nodew = gwf.disv.ncpl.array * 2 + welcells["nodenumber"][0] - plocs = [nodew for i in range(pcoord.shape[0])] - - # create particle data - pa = flopy.modpath.ParticleData( - plocs, - structured=False, - localx=pcoord[:, 0], - localy=pcoord[:, 1], - localz=pcoord[:, 2], - drape=0, - ) - - # create backward particle group - fpth = f"{mp_namea}.sloc" - pga = flopy.modpath.ParticleGroup( - particlegroupname="BACKWARD1", particledata=pa, filename=fpth - ) - - facedata = flopy.modpath.FaceDataType( - drape=0, - verticaldivisions1=10, - horizontaldivisions1=10, - verticaldivisions2=10, - horizontaldivisions2=10, - verticaldivisions3=10, - horizontaldivisions3=10, - verticaldivisions4=10, - horizontaldivisions4=10, - rowdivisions5=0, - columndivisions5=0, - rowdivisions6=4, - columndivisions6=4, - ) - pb = flopy.modpath.NodeParticleData(subdivisiondata=facedata, nodes=nodew) - # create forward particle group - fpth = f"{mp_nameb}.sloc" - pgb = flopy.modpath.ParticleGroupNodeTemplate( - particlegroupname="BACKWARD2", particledata=pb, filename=fpth - ) - - # create modpath files - mp = flopy.modpath.Modpath7( - modelname=mp_namea, flowmodel=gwf, exe_name="mp7", model_ws=model_ws - ) - flopy.modpath.Modpath7Bas(mp, porosity=0.1) - flopy.modpath.Modpath7Sim( - mp, - simulationtype="combined", - trackingdirection="backward", - weaksinkoption="pass_through", - weaksourceoption="pass_through", - referencetime=0.0, - stoptimeoption="extend", - timepointdata=[500, 1000.0], - particlegroups=pga, - ) - - # write modpath datasets - mp.write_input() - - # run modpath - success, buff = mp.run_model(silent=True, report=True) - if success: - for line in buff: - print(line) - else: - raise ValueError("Failed to run.") - - # create modpath files - mp = flopy.modpath.Modpath7( - modelname=mp_nameb, flowmodel=gwf, exe_name="mp7", model_ws=model_ws - ) - flopy.modpath.Modpath7Bas(mp, porosity=0.1) - flopy.modpath.Modpath7Sim( - mp, - simulationtype="endpoint", - trackingdirection="backward", - weaksinkoption="pass_through", - weaksourceoption="pass_through", - referencetime=0.0, - stoptimeoption="extend", - particlegroups=pgb, - ) - - # write modpath datasets - mp.write_input() - - # run modpath - success, buff = mp.run_model(silent=True, report=True) - if success: - for line in buff: - print(line) - else: - raise ValueError("Failed to run.") - - -example_name = "ex-gwt-keating" - -# Model units - -length_units = "m" -time_units = "days" - -# Table of model parameters - -nlay = 80 # Number of layers -nrow = 1 # Number of rows -ncol = 400 # Number of columns -delr = 25.0 # Column width ($m$) -delc = 1.0 # Row width ($m$) -delz = 25.0 # Layer thickness ($m$) -top = 2000.0 # Top of model domain ($m$) -bottom = 0.0 # Bottom of model domain ($m$) -hka = 1.0e-12 # Permeability of aquifer ($m^2$) -hkc = 1.0e-18 # Permeability of aquitard ($m^2$) -h1 = 800.0 # Head on left side ($m$) -h2 = 100.0 # Head on right side ($m$) -recharge = 0.5 # Recharge ($kg/s$) -recharge_conc = 1.0 # Normalized recharge concentration (unitless) -alpha_l = 1.0 # Longitudinal dispersivity ($m$) -alpha_th = 1.0 # Transverse horizontal dispersivity ($m$) -alpha_tv = 1.0 # Transverse vertical dispersivity ($m$) -period1 = 730 # Length of first simulation period ($d$) -period2 = 29270.0 # Length of second simulation period ($d$) -porosity = 0.1 # Porosity of mobile domain (unitless) -obs1 = (49, 1, 119) # Layer, row, and column for observation 1 -obs2 = (77, 1, 359) # Layer, row, and column for observation 2 - -obs1 = tuple([i - 1 for i in obs1]) -obs2 = tuple([i - 1 for i in obs2]) -seconds_to_days = 24.0 * 60.0 * 60.0 -permeability_to_conductivity = 1000.0 * 9.81 / 1.0e-3 * seconds_to_days -hka = hka * permeability_to_conductivity -hkc = hkc * permeability_to_conductivity -botm = [top - (k + 1) * delz for k in range(nlay)] -x = np.arange(0, 10000.0, delr) + delr / 2.0 -plotaspect = 1.0 - -# Fill hydraulic conductivity array -hydraulic_conductivity = np.ones((nlay, nrow, ncol), dtype=float) * hka -for k in range(nlay): - if 1000.0 <= botm[k] < 1100.0: - for j in range(ncol): - if 3000.0 <= x[j] <= 6000.0: - hydraulic_conductivity[k, 0, j] = hkc - -# Calculate recharge by converting from kg/s to m/d -rcol = [] -for jcol in range(ncol): - if 4200.0 <= x[jcol] <= 4800.0: - rcol.append(jcol) -number_recharge_cells = len(rcol) -rrate = recharge * seconds_to_days / 1000.0 -cell_area = delr * delc -rrate = rrate / (float(number_recharge_cells) * cell_area) -rchspd = {} -rchspd[0] = [[(0, 0, j), rrate, recharge_conc] for j in rcol] -rchspd[1] = [[(0, 0, j), rrate, 0.0] for j in rcol] - - -def build_mf6gwf(sim_folder): - ws = os.path.join(sim_folder, "mf6-gwt-keating") - name = "flow" - sim_ws = os.path.join(ws, "mf6gwf") - sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=sim_ws, exe_name="mf6") - tdis_ds = ((period1, 1, 1.0), (period2, 1, 1.0)) - flopy.mf6.ModflowTdis( - sim, nper=len(tdis_ds), perioddata=tdis_ds, time_units=time_units - ) - flopy.mf6.ModflowIms( - sim, - print_option="summary", - complexity="complex", - no_ptcrecord="all", - outer_dvclose=1.0e-4, - outer_maximum=2000, - under_relaxation="dbd", - linear_acceleration="BICGSTAB", - under_relaxation_theta=0.7, - under_relaxation_kappa=0.08, - under_relaxation_gamma=0.05, - under_relaxation_momentum=0.0, - backtracking_number=20, - backtracking_tolerance=2.0, - backtracking_reduction_factor=0.2, - backtracking_residual_limit=5.0e-4, - inner_dvclose=1.0e-5, - rcloserecord=[0.0001, "relative_rclose"], - inner_maximum=100, - relaxation_factor=0.0, - number_orthogonalizations=2, - preconditioner_levels=8, - preconditioner_drop_tolerance=0.001, - ) - gwf = flopy.mf6.ModflowGwf( - sim, modelname=name, save_flows=True, newtonoptions=["newton"] - ) - flopy.mf6.ModflowGwfdis( - gwf, - length_units=length_units, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - top=top, - botm=botm, - ) - flopy.mf6.ModflowGwfnpf( - gwf, - save_specific_discharge=True, - save_saturation=True, - icelltype=1, - k=hydraulic_conductivity, - ) - flopy.mf6.ModflowGwfic(gwf, strt=600.0) - chdspd = [[(k, 0, 0), h1] for k in range(nlay) if botm[k] < h1] - chdspd += [[(k, 0, ncol - 1), h2] for k in range(nlay) if botm[k] < h2] - flopy.mf6.ModflowGwfchd( - gwf, - stress_period_data=chdspd, - print_input=True, - print_flows=True, - save_flows=False, - pname="CHD-1", - ) - flopy.mf6.ModflowGwfrch( - gwf, - stress_period_data=rchspd, - auxiliary=["concentration"], - pname="RCH-1", - ) - - head_filerecord = f"{name}.hds" - budget_filerecord = f"{name}.bud" - flopy.mf6.ModflowGwfoc( - gwf, - head_filerecord=head_filerecord, - budget_filerecord=budget_filerecord, - saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - ) - return sim - - -def build_mf6gwt(sim_folder): - ws = os.path.join(sim_folder, "mf6-gwt-keating") - name = "trans" - sim_ws = os.path.join(ws, "mf6gwt") - sim = flopy.mf6.MFSimulation( - sim_name=name, - sim_ws=sim_ws, - exe_name="mf6", - continue_=True, - ) - tdis_ds = ((period1, 73, 1.0), (period2, 2927, 1.0)) - flopy.mf6.ModflowTdis( - sim, nper=len(tdis_ds), perioddata=tdis_ds, time_units=time_units - ) - flopy.mf6.ModflowIms( - sim, - print_option="summary", - outer_dvclose=1.0e-4, - outer_maximum=100, - under_relaxation="none", - linear_acceleration="BICGSTAB", - rcloserecord=[1000.0, "strict"], - inner_maximum=20, - inner_dvclose=1.0e-4, - relaxation_factor=0.0, - number_orthogonalizations=2, - preconditioner_levels=8, - preconditioner_drop_tolerance=0.001, - ) - gwt = flopy.mf6.ModflowGwt(sim, modelname=name, save_flows=True) - flopy.mf6.ModflowGwtdis( - gwt, - length_units=length_units, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - top=top, - botm=botm, - ) - flopy.mf6.ModflowGwtic(gwt, strt=0) - flopy.mf6.ModflowGwtmst(gwt, porosity=porosity) - flopy.mf6.ModflowGwtadv(gwt, scheme="upstream") - flopy.mf6.ModflowGwtdsp( - gwt, xt3d_off=True, alh=alpha_l, ath1=alpha_th, atv=alpha_tv - ) - pd = [ - ("GWFHEAD", "../mf6gwf/flow.hds"), - ("GWFBUDGET", "../mf6gwf/flow.bud"), - ] - flopy.mf6.ModflowGwtfmi( - gwt, flow_imbalance_correction=True, packagedata=pd - ) - sourcerecarray = [ - ("RCH-1", "AUX", "CONCENTRATION"), - ] - flopy.mf6.ModflowGwtssm(gwt, sources=sourcerecarray) - saverecord = { - 0: [ - ("CONCENTRATION", "STEPS", 10), - ("CONCENTRATION", "LAST"), - ("CONCENTRATION", "FREQUENCY", 10), - ], - 1: [ - ("CONCENTRATION", "STEPS", 27, 227), - ("CONCENTRATION", "LAST"), - ("CONCENTRATION", "FREQUENCY", 10), - ], - } - flopy.mf6.ModflowGwtoc( - gwt, - budget_filerecord=f"{name}.cbc", - concentration_filerecord=f"{name}.ucn", - concentrationprintrecord=[ - ("COLUMNS", ncol, "WIDTH", 15, "DIGITS", 6, "GENERAL") - ], - saverecord=saverecord, - printrecord=[ - ("CONCENTRATION", "LAST"), - ( - "BUDGET", - "ALL", - ), - ], - ) - obs_data = { - f"{name}.obs.csv": [ - ("obs1", "CONCENTRATION", obs1), - ("obs2", "CONCENTRATION", obs2), - ], - } - flopy.mf6.ModflowUtlobs( - gwt, digits=10, print_input=True, continuous=obs_data - ) - return sim - - -def build_model(ws): - sim_mf6gwf = build_mf6gwf(ws) - sim_mf6gwt = build_mf6gwt(ws) - sim_mf2005 = None # build_mf2005(sim_name) - sim_mt3dms = None # build_mt3dms(sim_name, sim_mf2005) - sims = (sim_mf6gwf, sim_mf6gwt, sim_mf2005, sim_mt3dms) - return sims - - -def write_model(sims, silent=True): - sim_mf6gwf, sim_mf6gwt, sim_mf2005, sim_mt3dms = sims - sim_mf6gwf.write_simulation(silent=silent) - sim_mf6gwt.write_simulation(silent=silent) - - -def run_keating_model(ws=example_name, silent=True): - sim = build_model(ws) - write_model(sim, silent=silent) - sim_mf6gwf, sim_mf6gwt, sim_mf2005, sim_mt3dms = sim - - print("Running mf6gwf model...") - success, buff = sim_mf6gwf.run_simulation(silent=silent) - if not success: - print(buff) - - print("Running mf6gwt model...") - success, buff = sim_mf6gwt.run_simulation(silent=silent) - if not success: - print(buff) - - return success - - -if __name__ == "__main__": - run() - run_keating_model() diff --git a/.docs/groundwater_paper/scripts/uspb_capture_par.py b/.docs/groundwater_paper/scripts/uspb_capture_par.py index bce3dadcd..1803ba5fe 100644 --- a/.docs/groundwater_paper/scripts/uspb_capture_par.py +++ b/.docs/groundwater_paper/scripts/uspb_capture_par.py @@ -311,17 +311,12 @@ def doit(): # increment icnt icnt += 1 - ## test cg_model function - # t = cf_model(models[0], klay, cells[0][0], cells[0][1], Qcf, baseQ) - # sys.stdout.write(t) - # create multiprocessing pool pool = mp.Pool(processes=nproc) args = [ (cf_model, idx, len(cells), klay, i, j, Qcf, baseQ, ml.lpf.hdry) for idx, (i, j) in enumerate(cells) ] - # sys.stdout.write(args) output = pool.map(unpack_args, args, nproc) pool.close() pool.join() diff --git a/.docs/md/get_modflow.md b/.docs/md/get_modflow.md index 5e9cd3bcf..f1581376a 100644 --- a/.docs/md/get_modflow.md +++ b/.docs/md/get_modflow.md @@ -71,6 +71,8 @@ Other auto-select options are only available if the current user can write files - `:system` - use `/usr/local/bin` - `:windowsapps` - use `%LOCALAPPDATA%\Microsoft\WindowsApps` +**Note:** on macOS, the Python bin directory for a freshly created `venv` environment may not be detected until the environment is deactivated and reactivated. + ## Selecting a distribution By default the distribution from the [`MODFLOW-USGS/executables` repository](https://github.com/MODFLOW-USGS/executables) is installed. This includes the MODFLOW 6 binary `mf6` and over 20 other related programs. The utility can also install from the main [MODFLOW 6 repo](https://github.com/MODFLOW-USGS/modflow6) or the [nightly build](https://github.com/MODFLOW-USGS/modflow6-nightly-build) distributions, which contain only: diff --git a/.docs/md/version_changes.md b/.docs/md/version_changes.md index d3a2a1e26..68d22ebe1 100644 --- a/.docs/md/version_changes.md +++ b/.docs/md/version_changes.md @@ -1,4 +1,43 @@ # Changelog +### Version 3.8.0 + +#### New features + +* [feat(datafile)](https://github.com/modflowpy/flopy/commit/d36bb78c3b7a12ab6f77bbe31e3572915753c86b): Add .headers property with data frame (#2221). Committed by Mike Taves on 2024-06-11. +* [feat(lgr-disv)](https://github.com/modflowpy/flopy/commit/7dec7c52db7c7bf3f8bca61de4d4a953ac1317d2): Add to_disv_gridprops() method to lgr object (#2271). Committed by langevin-usgs on 2024-07-26. + +#### Bug fixes + +* [fix(docs)](https://github.com/modflowpy/flopy/commit/4a26cab4e0af4f49775fd0dc327c8f5ff51843f6): Section underline matches section title (#2208). Committed by Mike Taves on 2024-06-06. +* [fix(vtk)](https://github.com/modflowpy/flopy/commit/d81d7c089f0688173f25c1f6d1e860e08c3a17ba): Fix __transient_vector access (#2209). Committed by mickey-tsai on 2024-06-06. +* [fix(swt)](https://github.com/modflowpy/flopy/commit/667774231a3c3e40fb68067331ead4b8a576cbee): Pass load_only down to Mt3dms.load() (#2222). Committed by wpbonelli on 2024-06-11. +* [fix(ParticleTrackFile)](https://github.com/modflowpy/flopy/commit/f15caaa0554f306eb5839588e4c75f9e14ef9641): Fix particle filtering in get_alldata (#2223). Committed by martclanor on 2024-06-11. +* [fix(regression)](https://github.com/modflowpy/flopy/commit/c69990ac37ce5d6828472af1eadab4dc6687c1e8): Corrections to test_create_tests_transport (#2228). Committed by Mike Taves on 2024-06-13. +* [fix(binaryread)](https://github.com/modflowpy/flopy/commit/e2a85a38640656d5795f8859defb0de14cf668e6): Raise/handle EOFError, deprecate vartype=str (#2226). Committed by Mike Taves on 2024-06-13. +* [fix(pandas warnings)](https://github.com/modflowpy/flopy/commit/5cdd609748cc70d93859192519d87d34194aec40): Catch pandas warnings and display them in a more useful way (#2229). Committed by scottrp on 2024-06-14. +* [fix](https://github.com/modflowpy/flopy/commit/d9ebd81903bb6aa03864e156a0488128867286ef): Test_uzf_negative_iuzfopt (#2236). Committed by Mike Taves on 2024-06-17. +* [fix(PlotMapView)](https://github.com/modflowpy/flopy/commit/678bb61346bc226831ae5b66615bc9a00c355cc5): Default to all layers in plot_pathline() (#2242). Committed by wpbonelli on 2024-06-19. +* [fix(Raster)](https://github.com/modflowpy/flopy/commit/a2a159f1758781fc633710f68af5441eb1e4dafb): Reclassify np.float64 correctly (#2235). Committed by martclanor on 2024-06-24. +* [fix(HeadFile)](https://github.com/modflowpy/flopy/commit/9db562a3b1d18af3801036b1d79d74668c0f71c6): Fix dis reversal, expand tests (#2247). Committed by wpbonelli on 2024-06-25. +* [fix(mfmodel)](https://github.com/modflowpy/flopy/commit/576cefe5e9826a53a5085d7e3aee9ce7765be22f): Fix get_ims_package (#2272). Committed by martclanor on 2024-08-06. +* [fix(modelgrid)](https://github.com/modflowpy/flopy/commit/b64f2bdae803830936da89cf1c8e97ab4f660981): Fix missing coord info if disv (#2284). Committed by martclanor on 2024-08-07. +* [fix(examples)](https://github.com/modflowpy/flopy/commit/2eace7843409b78497bc941d49eab68394833bfb): Restore example notebooks skipped after #2264 (#2286). Committed by wpbonelli on 2024-08-08. + +#### Refactoring + +* [refactor(expired deprecation)](https://github.com/modflowpy/flopy/commit/31955a7536b1f53d2a572580e05ff282a933716e): Raise AttributeError with to_shapefile (#2200). Committed by Mike Taves on 2024-05-30. +* [refactor](https://github.com/modflowpy/flopy/commit/bbabf86c0292ed2b237f89371afba01140050592): Deprecate unused flopy.utils.binaryfile.binaryread_struct (#2201). Committed by Mike Taves on 2024-05-31. +* [refactor(exceptions)](https://github.com/modflowpy/flopy/commit/0d9947eb8301561569676d4e3bdbc28a869e5bad): Raise NotImplementedError where appropriate (#2213). Committed by Mike Taves on 2024-06-07. +* [refactor(datafile)](https://github.com/modflowpy/flopy/commit/e2d16df5cc1a27a43e274a5b16eee7d91d5decfa): Use len(obj) rather than obj.get_nrecords() (#2215). Committed by Mike Taves on 2024-06-11. +* [refactor(binarygrid_util)](https://github.com/modflowpy/flopy/commit/ae388ef5a2f40abc950c05ca5b156f7e42337983): Refactor get_iverts to be general and not dependent on grid type (#2230). Committed by langevin-usgs on 2024-06-14. +* [refactor(datafile)](https://github.com/modflowpy/flopy/commit/cfdedbcb35c2f812e2b7efd78706d4eaa8cdc8f5): Deprecate list_records() and other list_ methods (#2232). Committed by Mike Taves on 2024-06-14. +* [refactor](https://github.com/modflowpy/flopy/commit/1e44b3fd57bfad1602a06247e44878a7237e0e3a): Fixes for numpy-2.0 deprecation warnings, require numpy>=1.20.3 (#2237). Committed by Mike Taves on 2024-06-17. +* [refactor](https://github.com/modflowpy/flopy/commit/59040d0948337245d6527671960b56446d39d4d3): Np.where(cond) -> np.asarray(cond).nonzero() (#2238). Committed by wpbonelli on 2024-06-17. +* [refactor(dependencies)](https://github.com/modflowpy/flopy/commit/e48198c661d8b10d1c1120a88a6cd0c7987d7b22): Support numpy 2 (#2241). Committed by wpbonelli on 2024-06-19. +* [refactor(get-modflow)](https://github.com/modflowpy/flopy/commit/baf8dff95ae3cc55adee54ec3e141437ae153b9c): Support ARM macs by default (previously opt-in) (#2225). Committed by wpbonelli on 2024-06-21. +* [refactor(Raster)](https://github.com/modflowpy/flopy/commit/bad483b3910218dc828c993863d540793111090d): Add new methods and checks (#2267). Committed by Joshua Larsen on 2024-07-17. +* [refactor(resample_to_grid)](https://github.com/modflowpy/flopy/commit/bd7f0a578b9093697948255eb9ecc164d5574f6e): Filter raster nan values from scipy resampling routines (#2285). Committed by Joshua Larsen on 2024-08-08. + ### Version 3.7.0 #### New features diff --git a/.docs/tutorials.rst b/.docs/tutorials.rst index b1aaf3c17..7b2525504 100644 --- a/.docs/tutorials.rst +++ b/.docs/tutorials.rst @@ -35,6 +35,7 @@ MODFLOW 6 Notebooks/mf6_output_tutorial01 Notebooks/mf6_sfr_tutorial01 Notebooks/mf6_tutorial01 + Notebooks/mf6_lgr_tutorial01 MODFLOW-2005 @@ -50,15 +51,6 @@ MODFLOW-2005 Notebooks/mf_tutorial02 -MODFLOW-LGR ------------ - -.. toctree:: - :maxdepth: 2 - - Notebooks/lgr_tutorial01 - - MODFLOW-NWT ----------- diff --git a/.flake8 b/.flake8 deleted file mode 100644 index d7d2fc9dd..000000000 --- a/.flake8 +++ /dev/null @@ -1,29 +0,0 @@ -[flake8] -exclude = - .git - __pycache__ - build - dist - examples - autotest -ignore = - # https://flake8.pycqa.org/en/latest/user/error-codes.html - F401, - # https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes - # Indentation - E121, E122, E126, E127, E128, - # Whitespace - E203, E221, E222, E226, E231, E241, - # Import - E402, - # Line length - E501, E502, - # Statement - E722, E741, - # Whitespace warning - W291, W292, W293, - # Blank line warning - W391, - # Line break warning - W503, W504 -statistics = True diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 12fd54a1f..a3de02b4d 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -13,10 +13,6 @@ jobs: matrix: os: [ ubuntu-latest, macos-latest, windows-latest ] python-version: [ 3.8, 3.9, "3.10", "3.11", "3.12" ] - exclude: - # avoid shutil.copytree infinite recursion bug - # https://github.com/python/cpython/pull/17098 - - python-version: '3.8.0' defaults: run: shell: bash -l {0} @@ -44,6 +40,14 @@ jobs: - name: Install Modflow executables uses: modflowpy/install-modflow-action@v1 + - name: Install triangle (macOS workaround) + if: runner.os == 'macOS' + uses: modflowpy/install-modflow-action@v1 + with: + repo: executables + ostag: mac + subset: triangle + - name: Run benchmarks working-directory: autotest run: | diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml index 29da17835..07e7b73bc 100644 --- a/.github/workflows/commit.yml +++ b/.github/workflows/commit.yml @@ -5,6 +5,9 @@ on: branches: - master - develop +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true jobs: build: name: Build @@ -20,7 +23,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: 3.9 cache: 'pip' cache-dependency-path: pyproject.toml @@ -52,7 +55,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: 3.9 cache: 'pip' cache-dependency-path: pyproject.toml @@ -81,7 +84,7 @@ jobs: shell: bash timeout-minutes: 10 env: - PYTHON_VERSION: 3.8 + PYTHON_VERSION: 3.9 steps: - name: Checkout repo @@ -131,10 +134,6 @@ jobs: matrix: os: [ ubuntu-latest, macos-latest, windows-latest ] python-version: [ 3.8, 3.9, "3.10", "3.11", "3.12" ] - exclude: - # avoid shutil.copytree infinite recursion bug - # https://github.com/python/cpython/pull/17098 - - python-version: '3.8.0' defaults: run: shell: bash -l {0} @@ -167,13 +166,21 @@ jobs: with: repo: modflow6-nightly-build + - name: Install triangle (macOS workaround) + if: runner.os == 'macOS' + uses: modflowpy/install-modflow-action@v1 + with: + repo: executables + ostag: mac + subset: triangle + - name: Update package classes run: python -m flopy.mf6.utils.generate_classes --ref develop --no-backup - name: Run tests working-directory: autotest run: | - pytest -v -m="not example and not regression" -n=auto --cov=flopy --cov-append --cov-report=xml --durations=0 --keep-failed=.failed --dist loadfile + pytest -v -m="not example" -n=auto --cov=flopy --cov-append --cov-report=xml --durations=0 --keep-failed=.failed --dist loadfile coverage report env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index d021a66a5..590a1578f 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -13,10 +13,6 @@ jobs: matrix: os: [ ubuntu-latest, macos-latest, windows-latest ] python-version: [ 3.8, 3.9, "3.10", "3.11", "3.12" ] - exclude: - # avoid shutil.copytree infinite recursion bug - # https://github.com/python/cpython/pull/17098 - - python-version: '3.8.0' defaults: run: shell: bash -l {0} @@ -64,6 +60,14 @@ jobs: with: repo: modflow6-nightly-build + - name: Install triangle (macOS workaround) + if: runner.os == 'macOS' + uses: modflowpy/install-modflow-action@v1 + with: + repo: executables + ostag: mac + subset: triangle + - name: Update FloPy packages run: python -m flopy.mf6.utils.generate_classes --ref develop --no-backup diff --git a/.github/workflows/mf6.yml b/.github/workflows/mf6.yml index 4489c41b2..72011cdfc 100644 --- a/.github/workflows/mf6.yml +++ b/.github/workflows/mf6.yml @@ -8,14 +8,14 @@ on: branches: - master - develop - +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true jobs: - test: + test_mf6: name: Modflow6 FloPy tests runs-on: ubuntu-latest - strategy: - fail-fast: false defaults: run: shell: bash @@ -38,6 +38,7 @@ jobs: pip install https://github.com/Deltares/xmipy/zipball/develop pip install https://github.com/MODFLOW-USGS/modflowapi/zipball/develop pip install .[test,optional] + pip install meson ninja - name: Setup GNU Fortran uses: fortran-lang/setup-fortran@v1 @@ -51,46 +52,30 @@ jobs: repository: MODFLOW-USGS/modflow6 path: modflow6 - - name: Update flopy MODFLOW 6 classes - working-directory: modflow6/autotest - run: | - python update_flopy.py - - - name: Install meson - run: | - pip3 install meson ninja - - - name: Setup modflow + - name: Build and install MF6 working-directory: modflow6 run: | meson setup builddir --buildtype=debugoptimized --prefix=$(pwd) --libdir=bin - - - name: Build modflow - working-directory: modflow6 - run: | - meson compile -C builddir - - - name: Install modflow - working-directory: modflow6 - run: | meson install -C builddir + meson test --verbose --no-rebuild -C builddir - - name: Get executables + - name: Update package classes + working-directory: modflow6/autotest + run: python update_flopy.py + + - name: Install executables working-directory: modflow6/autotest env: GITHUB_TOKEN: ${{ github.token }} - run: | - pytest -v --durations=0 get_exes.py + run: pytest -v --durations=0 get_exes.py - name: Run tests working-directory: modflow6/autotest - run: | - pytest -v --cov=flopy --cov-report=xml --durations=0 -n auto -m "not repo and not regression" + run: pytest -v --cov=flopy --cov-report=xml --cov-append --durations=0 -n auto -m "not repo and not regression" - name: Print coverage report before upload working-directory: ./modflow6/autotest - run: | - coverage report + run: coverage report - name: Upload coverage to Codecov if: @@ -98,3 +83,68 @@ jobs: uses: codecov/codecov-action@v3 with: files: ./modflow6/autotest/coverage.xml + + test_mf6_examples: + name: MF6 examples FloPy tests + runs-on: ubuntu-latest + defaults: + run: + shell: bash + steps: + + - name: Checkout flopy repo + uses: actions/checkout@v4 + + - name: Checkout MODFLOW 6 + uses: actions/checkout@v4 + with: + repository: MODFLOW-USGS/modflow6 + path: modflow6 + + - name: Checkout MF6 examples + uses: actions/checkout@v4 + with: + repository: MODFLOW-USGS/modflow6-examples + path: modflow6-examples + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.9 + cache: 'pip' + cache-dependency-path: pyproject.toml + + - name: Install Python dependencies + run: | + pip install --upgrade pip + pip install https://github.com/modflowpy/pymake/zipball/master + pip install https://github.com/Deltares/xmipy/zipball/develop + pip install https://github.com/MODFLOW-USGS/modflowapi/zipball/develop + pip install .[test,optional] + pip install meson ninja + pip install -r modflow6-examples/etc/requirements.pip.txt + + - name: Setup GNU Fortran + uses: fortran-lang/setup-fortran@v1 + with: + compiler: gcc + version: 13 + + - name: Install executables + uses: modflowpy/install-modflow-action@v1 + + - name: Build and install MF6 + working-directory: modflow6 + run: | + meson setup builddir --buildtype=debugoptimized --prefix=$(pwd) --libdir=bin + meson install -C builddir + meson test --verbose --no-rebuild -C builddir + cp bin/* ~/.local/bin/modflow/ + + - name: Update package classes + working-directory: modflow6/autotest + run: python update_flopy.py + + - name: Test MF6 examples + working-directory: modflow6-examples/autotest + run: pytest -v -n=auto --durations=0 test_scripts.py diff --git a/.github/workflows/optional.yml b/.github/workflows/optional.yml index a84940cd9..c1962380a 100644 --- a/.github/workflows/optional.yml +++ b/.github/workflows/optional.yml @@ -14,7 +14,7 @@ jobs: shell: bash timeout-minutes: 10 env: - PYTHON_VERSION: 3.8 + PYTHON_VERSION: 3.9 strategy: fail-fast: false matrix: diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml deleted file mode 100644 index 7bbc672fe..000000000 --- a/.github/workflows/regression.yml +++ /dev/null @@ -1,65 +0,0 @@ -name: FloPy regression tests - -on: - schedule: - - cron: '0 8 * * *' # run at 8 AM UTC (12 am PST) - -jobs: - regression: - name: Regression tests - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ ubuntu-latest, macos-latest, windows-latest ] - python-version: [ 3.8, 3.9, "3.10", "3.11", "3.12" ] - exclude: - # avoid shutil.copytree infinite recursion bug - # https://github.com/python/cpython/pull/17098 - - python-version: '3.8.0' - defaults: - run: - shell: bash -l {0} - timeout-minutes: 90 - steps: - - name: Checkout repo - uses: actions/checkout@v4 - - - name: Setup Micromamba - uses: mamba-org/setup-micromamba@v1 - with: - environment-file: etc/environment.yml - cache-environment: true - cache-downloads: true - create-args: >- - python=${{ matrix.python-version }} - init-shell: >- - bash - powershell - - - name: Install FloPy - run: pip install . - - - name: Install Modflow-related executables - uses: modflowpy/install-modflow-action@v1 - - - name: Install Modflow dev build executables - uses: modflowpy/install-modflow-action@v1 - with: - repo: modflow6-nightly-build - - - name: Update FloPy packages - run: python -m flopy.mf6.utils.generate_classes --ref develop --no-backup - - - name: Run regression tests - working-directory: autotest - run: pytest -v -m="regression" -n=auto --durations=0 --keep-failed=.failed - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Upload failed test outputs - uses: actions/upload-artifact@v4 - if: failure() - with: - name: failed-regression-${{ matrix.os }}-${{ matrix.python-version }} - path: autotest/.failed/** diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index df9c1b1bb..f3a5717b0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -27,7 +27,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: 3.9 cache: 'pip' cache-dependency-path: pyproject.toml @@ -56,7 +56,7 @@ jobs: if [[ "$ver" == *"rc"* ]]; then python scripts/update_version.py -v "${ver%"rc"}" else - python scripts/update_version.py -v "$ver" --approve + python scripts/update_version.py -v "$ver" fi # show version and set output @@ -173,7 +173,7 @@ jobs: # actions/download-artifact won't look at previous workflow runs but we need to in order to get changelog - name: Download artifacts - uses: dawidd6/action-download-artifact@v3 + uses: dawidd6/action-download-artifact@v6 - name: Draft release env: @@ -211,7 +211,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: 3.9 cache: 'pip' cache-dependency-path: pyproject.toml diff --git a/.github/workflows/rtd.yml b/.github/workflows/rtd.yml index 4192123a9..b12988912 100644 --- a/.github/workflows/rtd.yml +++ b/.github/workflows/rtd.yml @@ -1,16 +1,61 @@ name: FloPy documentation - on: push: pull_request: branches: - master - develop - + workflow_dispatch: + inputs: + ref: + description: 'The tag, branch or commit hash to trigger an RTD build for. Branches and tags must be fully formed, e.g. refs/heads/ or refs/tags/ respectively.' + required: false + type: string + default: 'refs/heads/develop' +concurrency: + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name }} + cancel-in-progress: true jobs: + set_options: + name: Set release options + runs-on: ubuntu-22.04 + outputs: + ref: ${{ steps.set_ref.outputs.ref }} + sha: ${{ steps.set_sha.outputs.sha }} + steps: + + - name: Set ref + id: set_ref + run: | + # if ref was provided explicitly via workflow_dispatch, use it + if [[ ("${{ github.event_name }}" == "workflow_dispatch") && (-n "${{ inputs.ref }}") ]]; then + ref="${{ inputs.ref }}" + echo "using ref $ref from workflow_dispatch" + else + # otherwise use the current branch + ref="${{ github.ref }}" + echo "using current ref $ref" + fi + echo "ref=$ref" >> $GITHUB_OUTPUT + + - name: Checkout flopy repo + uses: actions/checkout@v4 + with: + ref: ${{ steps.set_ref.outputs.ref }} + + - name: Set sha + id: set_sha + run: | + if [[ ("${{ github.event_name }}" == "workflow_dispatch") && (-n "${{ inputs.ref }}") ]]; then + sha=$(git rev-parse ${{ steps.set_ref.outputs.ref }}) + else + sha="${{ github.sha }}" + fi + echo "sha=$sha" >> $GITHUB_OUTPUT rtd_build: name: Prepare and test notebooks + needs: set_options runs-on: ${{ matrix.os }} strategy: fail-fast: false @@ -24,6 +69,8 @@ jobs: steps: - name: Checkout flopy repo uses: actions/checkout@v4 + with: + ref: ${{ needs.set_options.outputs.ref }} - name: Output repo information run: | @@ -72,32 +119,56 @@ jobs: $PSDefaultParameterValues['*:ErrorAction']='Stop' powershell .github/install_opengl.ps1 - - name: Install MODFLOW executables + - name: Install Modflow-related executables uses: modflowpy/install-modflow-action@v1 + - name: Install Modflow dev build executables + uses: modflowpy/install-modflow-action@v1 + with: + repo: modflow6-nightly-build + + - name: Install triangle (macOS workaround) + if: runner.os == 'macOS' + uses: modflowpy/install-modflow-action@v1 + with: + repo: executables + ostag: mac + subset: triangle + - name: Run tutorial and example notebooks working-directory: autotest run: pytest -v -n auto test_notebooks.py - name: Upload notebooks artifact for ReadtheDocs - if: github.repository_owner == 'modflowpy' && github.event_name == 'push' && runner.os == 'Linux' + if: | + github.repository_owner == 'modflowpy' && + runner.os == 'Linux' && + ( + github.event_name == 'push' || + github.event_name == 'workflow_dispatch' + ) uses: actions/upload-artifact@v4 with: - name: notebooks-for-${{ github.sha }} + name: notebooks-for-${{ needs.set_options.outputs.sha }} path: .docs/Notebooks/*.ipynb # trigger rtd if previous job was successful rtd: name: Read the Docs trigger - needs: rtd_build + needs: + - rtd_build + - set_options runs-on: ubuntu-latest - if: - github.repository_owner == 'modflowpy' && github.event_name == 'push' + github.repository_owner == 'modflowpy' && + ( + github.event_name == 'push' || + github.event_name == 'workflow_dispatch' + ) steps: - - name: Trigger RTDs build on master and develop branches + - name: Trigger RTDs build uses: dfm/rtds-action@v1 with: webhook_url: ${{ secrets.RTDS_WEBHOOK_URL }} webhook_token: ${{ secrets.RTDS_WEBHOOK_TOKEN }} - commit_ref: ${{ github.ref }} + commit_ref: ${{ needs.set_options.outputs.ref }} diff --git a/.pylintrc b/.pylintrc deleted file mode 100644 index a41a6deb5..000000000 --- a/.pylintrc +++ /dev/null @@ -1,586 +0,0 @@ -[MASTER] - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code. -extension-pkg-whitelist= - -# Add files or directories to the blacklist. They should be base names, not -# paths. -ignore=CVS - -# Add files or directories matching the regex patterns to the blacklist. The -# regex matches against base names, not paths. -ignore-patterns= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the -# number of processors available to use. -jobs=1 - -# Control the amount of potential inferred values when inferring a single -# object. This can help the performance when dealing with large functions or -# complex, nested conditions. -limit-inference-results=100 - -# List of plugins (as comma separated values of python module names) to load, -# usually to register additional checkers. -load-plugins= - -# Pickle collected data for later comparisons. -persistent=yes - -# Specify a configuration file. -#rcfile= - -# When enabled, pylint would attempt to guess common misconfiguration and emit -# user-friendly hints instead of false-positive error messages. -suggestion-mode=yes - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. -confidence= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once). You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use "--disable=all --enable=classes -# --disable=W". -disable=print-statement, - parameter-unpacking, - unpacking-in-except, - old-raise-syntax, - backtick, - long-suffix, - old-ne-operator, - old-octal-literal, - import-star-module-level, - non-ascii-bytes-literal, - raw-checker-failed, - bad-inline-option, - locally-disabled, - file-ignored, - suppressed-message, - useless-suppression, - deprecated-pragma, - use-symbolic-message-instead, - apply-builtin, - basestring-builtin, - buffer-builtin, - cmp-builtin, - coerce-builtin, - execfile-builtin, - file-builtin, - long-builtin, - raw_input-builtin, - reduce-builtin, - standarderror-builtin, - unicode-builtin, - xrange-builtin, - coerce-method, - delslice-method, - getslice-method, - setslice-method, - no-absolute-import, - old-division, - dict-iter-method, - dict-view-method, - next-method-called, - metaclass-assignment, - indexing-exception, - raising-string, - reload-builtin, - oct-method, - hex-method, - nonzero-method, - cmp-method, - input-builtin, - round-builtin, - intern-builtin, - unichr-builtin, - map-builtin-not-iterating, - zip-builtin-not-iterating, - range-builtin-not-iterating, - filter-builtin-not-iterating, - using-cmp-argument, - eq-without-hash, - div-method, - idiv-method, - rdiv-method, - exception-message-attribute, - invalid-str-codec, - sys-max-int, - bad-python3-import, - deprecated-string-function, - deprecated-str-translate-call, - deprecated-itertools-function, - deprecated-types-field, - next-method-defined, - dict-items-not-iterating, - dict-keys-not-iterating, - dict-values-not-iterating, - deprecated-operator-function, - deprecated-urllib-function, - xreadlines-attribute, - deprecated-sys-function, - exception-escape, - comprehension-escape, - C0330 - - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). See also the "--disable" option for examples. -enable=c-extension-no-member - - -[REPORTS] - -# Python expression which should return a score less than or equal to 10. You -# have access to the variables 'error', 'warning', 'refactor', and 'convention' -# which contain the number of messages in each category, as well as 'statement' -# which is the total number of statements analyzed. This score is used by the -# global evaluation report (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details. -#msg-template= - -# Set the output format. Available formats are text, parseable, colorized, json -# and msvs (visual studio). You can also give a reporter class, e.g. -# mypackage.mymodule.MyReporterClass. -output-format=text - -# Tells whether to display a full report or only the messages. -reports=no - -# Activate the evaluation score. -score=yes - - -[REFACTORING] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - -# Complete name of functions that never returns. When checking for -# inconsistent-return-statements if a never returning function is called then -# it will be considered as an explicit return statement and no message will be -# printed. -never-returning-functions=sys.exit - - -[LOGGING] - -# Format style used to check logging format string. `old` means using % -# formatting, `new` is for `{}` formatting,and `fstr` is for f-strings. -logging-format-style=old - -# Logging modules to check that the string format arguments are in logging -# function parameter format. -logging-modules=logging - - -[SPELLING] - -# Limits count of emitted suggestions for spelling mistakes. -max-spelling-suggestions=4 - -# Spelling dictionary name. Available dictionaries: none. To make it work, -# install the python-enchant package. -spelling-dict= - -# List of comma separated words that should not be checked. -spelling-ignore-words= - -# A path to a file that contains the private dictionary; one word per line. -spelling-private-dict-file= - -# Tells whether to store unknown words to the private dictionary (see the -# --spelling-private-dict-file option) instead of raising a message. -spelling-store-unknown-words=no - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME, - XXX, - TODO - - -[TYPECHECK] - -# List of decorators that produce context managers, such as -# contextlib.contextmanager. Add to this list to register other decorators that -# produce valid context managers. -contextmanager-decorators=contextlib.contextmanager - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members=graph.*,requests.* - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# Tells whether to warn about missing members when the owner of the attribute -# is inferred to be None. -ignore-none=yes - -# This flag controls whether pylint should warn about no-member and similar -# checks whenever an opaque object is returned when inferring. The inference -# can return multiple potential results while evaluating a Python object, but -# some branches might not be evaluated, which results in partial inference. In -# that case, it might be useful to still emit no-member and other checks for -# the rest of the inferred objects. -ignore-on-opaque-inference=yes - -# List of class names for which member attributes should not be checked (useful -# for classes with dynamically set attributes). This supports the use of -# qualified names. -ignored-classes=optparse.Values,thread._local,_thread._local - -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis). It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules= - -# Show a hint with possible names when a member name was not found. The aspect -# of finding the hint is based on edit distance. -missing-member-hint=yes - -# The minimum edit distance a name should have in order to be considered a -# similar match for a missing member name. -missing-member-hint-distance=1 - -# The total number of similar names that should be taken in consideration when -# showing a hint for a missing member. -missing-member-max-choices=1 - -# List of decorators that change the signature of a decorated function. -signature-mutators= - - -[VARIABLES] - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid defining new builtins when possible. -additional-builtins= - -# Tells whether unused global variables should be treated as a violation. -allow-global-unused-variables=yes - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_, - _cb - -# A regular expression matching the name of dummy variables (i.e. expected to -# not be used). -dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore. -ignored-argument-names=_.*|^ignored_|^unused_ - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# List of qualified module names which can have objects that can redefine -# builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io - - -[FORMAT] - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Maximum number of characters on a single line. -max-line-length=100 - -# Maximum number of lines in a module. -max-module-lines=1000 - -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma, - dict-separator - -# Allow the body of a class to be on the same line as the declaration if body -# contains single statement. -single-line-class-stmt=no - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - -# Good variable names which should always be accepted, separated by a comma -good-names=kv,nr,nc,sy,ss,iu,hy,hk - - -[SIMILARITIES] - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - -# Minimum lines number of a similarity. -min-similarity-lines=4 - - -[BASIC] - -# Naming style matching correct argument names. -argument-naming-style=snake_case - -# Regular expression matching correct argument names. Overrides argument- -# naming-style. -#argument-rgx= - -# Naming style matching correct attribute names. -attr-naming-style=snake_case - -# Regular expression matching correct attribute names. Overrides attr-naming- -# style. -#attr-rgx= - -# Bad variable names which should always be refused, separated by a comma. -bad-names=foo, - bar, - baz, - toto, - tutu, - tata - -# Naming style matching correct class attribute names. -class-attribute-naming-style=any - -# Regular expression matching correct class attribute names. Overrides class- -# attribute-naming-style. -#class-attribute-rgx= - -# Naming style matching correct class names. -class-naming-style=PascalCase - -# Regular expression matching correct class names. Overrides class-naming- -# style. -#class-rgx= - -# Naming style matching correct constant names. -const-naming-style=UPPER_CASE - -# Regular expression matching correct constant names. Overrides const-naming- -# style. -#const-rgx= - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - -# Naming style matching correct function names. -function-naming-style=snake_case - -# Regular expression matching correct function names. Overrides function- -# naming-style. -#function-rgx= - -# Good variable names which should always be accepted, separated by a comma. -good-names=i, - j, - k, - ex, - Run, - _ - -# Include a hint for the correct naming format with invalid-name. -include-naming-hint=no - -# Naming style matching correct inline iteration names. -inlinevar-naming-style=any - -# Regular expression matching correct inline iteration names. Overrides -# inlinevar-naming-style. -#inlinevar-rgx= - -# Naming style matching correct method names. -method-naming-style=snake_case - -# Regular expression matching correct method names. Overrides method-naming- -# style. -#method-rgx= - -# Naming style matching correct module names. -module-naming-style=snake_case - -# Regular expression matching correct module names. Overrides module-naming- -# style. -#module-rgx= - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# List of decorators that produce properties, such as abc.abstractproperty. Add -# to this list to register other decorators that produce valid properties. -# These decorators are taken in consideration only for invalid-name. -property-classes=abc.abstractproperty - -# Naming style matching correct variable names. -variable-naming-style=snake_case - -# Regular expression matching correct variable names. Overrides variable- -# naming-style. -#variable-rgx= - - -[STRING] - -# This flag controls whether the implicit-str-concat-in-sequence should -# generate a warning on implicit string concatenation in sequences defined over -# several lines. -check-str-concat-over-line-jumps=no - - -[IMPORTS] - -# List of modules that can be imported at any level, not just the top level -# one. -allow-any-import-level= - -# Allow wildcard imports from modules that define __all__. -allow-wildcard-with-all=no - -# Analyse import fallback blocks. This can be used to support both Python 2 and -# 3 compatible code, which means that the block might have code that exists -# only in one or another interpreter, leading to false positives when analysed. -analyse-fallback-blocks=no - -# Deprecated modules which should not be used, separated by a comma. -deprecated-modules=optparse,tkinter.tix - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled). -ext-import-graph= - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled). -import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled). -int-import-graph= - -# Force import order to recognize a module as part of the standard -# compatibility libraries. -known-standard-library= - -# Force import order to recognize a module as part of a third party library. -known-third-party=enchant - -# Couples of modules and preferred modules, separated by a comma. -preferred-modules= - - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__, - __new__, - setUp, - __post_init__ - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict, - _fields, - _replace, - _source, - _make - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=cls - - -[DESIGN] - -# Maximum number of arguments for function / method. -max-args=5 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Maximum number of boolean expressions in an if statement (see R0916). -max-bool-expr=5 - -# Maximum number of branch for function / method body. -max-branches=12 - -# Maximum number of locals for function / method body. -max-locals=15 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - -# Maximum number of return / yield for function / method body. -max-returns=6 - -# Maximum number of statements in function / method body. -max-statements=50 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "BaseException, Exception". -overgeneral-exceptions=BaseException, - Exception diff --git a/.readthedocs.yml b/.readthedocs.yml index 7e3afd5ea..43dde5ed0 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -25,8 +25,9 @@ build: sphinx: configuration: .docs/conf.py -# Build docs in additional formats such as PDF and ePub -formats: all +# Build only html and htmlzip (skip others, including PDF and EPUB) +formats: + - htmlzip # Set the Python version and requirements python: diff --git a/CITATION.cff b/CITATION.cff index 08c4554c4..b9cf5acb6 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -3,8 +3,8 @@ message: If you use this software, please cite both the article from preferred-c references, and the software itself. type: software title: FloPy -version: 3.7.0 -date-released: '2024-05-23' +version: 3.8.0 +date-released: '2024-08-08' doi: 10.5066/F7BK19FH abstract: A Python package to create, run, and post-process MODFLOW-based models. repository-artifact: https://pypi.org/project/flopy diff --git a/DISCLAIMER.md b/DISCLAIMER.md deleted file mode 100644 index c3b346b8d..000000000 --- a/DISCLAIMER.md +++ /dev/null @@ -1,11 +0,0 @@ -Disclaimer ----------- - -This software is preliminary or provisional and is subject to revision. It is -being provided to meet the need for timely best science. This software is -provided "as is" and "as-available", and makes no representations or warranties -of any kind concerning the software, whether express, implied, statutory, or -other. This includes, without limitation, warranties of title, -merchantability, fitness for a particular purpose, non-infringement, absence -of latent or other defects, accuracy, or the presence or absence of errors, -whether or not known or discoverable. \ No newline at end of file diff --git a/README.md b/README.md index 646964b7f..3c8bf2008 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ flopy3 -### Version 3.7.0 +### Version 3.8.0 [![flopy continuous integration](https://github.com/modflowpy/flopy/actions/workflows/commit.yml/badge.svg?branch=develop)](https://github.com/modflowpy/flopy/actions/workflows/commit.yml) [![Read the Docs](https://github.com/modflowpy/flopy/actions/workflows/rtd.yml/badge.svg?branch=develop)](https://github.com/modflowpy/flopy/actions/workflows/rtd.yml) @@ -35,7 +35,7 @@ Installation FloPy requires **Python** 3.8+ with: ``` -numpy >=1.15.0,<2.0.0 +numpy >=1.20.3 matplotlib >=1.4.0 pandas >=2.0.0 ``` @@ -150,7 +150,7 @@ How to Cite ##### ***Software/Code citation for FloPy:*** -[Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.7.0: U.S. Geological Survey Software Release, 23 May 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH) +[Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.8.0: U.S. Geological Survey Software Release, 08 August 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH) Additional FloPy Related Publications @@ -170,15 +170,3 @@ MODFLOW Resources + [Online guide for MODFLOW-2000](https://water.usgs.gov/nrp/gwsoftware/modflow2000/Guide/) + [Online guide for MODFLOW-2005](https://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/) + [Online guide for MODFLOW-NWT](https://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/) - - -Disclaimer ----------- - -This software is provided "as is" and "as-available", and makes no -representations or warranties of any kind concerning the software, whether -express, implied, statutory, or other. This includes, without limitation, -warranties of title, merchantability, fitness for a particular purpose, -non-infringement, absence of latent or other defects, accuracy, or the -presence or absence of errors, whether or not known or discoverable. - diff --git a/_config.yml b/_config.yml deleted file mode 100644 index 2f7efbeab..000000000 --- a/_config.yml +++ /dev/null @@ -1 +0,0 @@ -theme: jekyll-theme-minimal \ No newline at end of file diff --git a/autotest/__snapshots__/test_mp7/test_mp7_output[mf2005].npy b/autotest/__snapshots__/test_mp7/test_mp7_output[mf2005].npy new file mode 100644 index 000000000..34a4cbcfa Binary files /dev/null and b/autotest/__snapshots__/test_mp7/test_mp7_output[mf2005].npy differ diff --git a/autotest/__snapshots__/test_mp7/test_mp7_output[mf6].npy b/autotest/__snapshots__/test_mp7/test_mp7_output[mf6].npy new file mode 100644 index 000000000..2db8df7ae Binary files /dev/null and b/autotest/__snapshots__/test_mp7/test_mp7_output[mf6].npy differ diff --git a/autotest/conftest.py b/autotest/conftest.py index 6f770ef54..13e25a530 100644 --- a/autotest/conftest.py +++ b/autotest/conftest.py @@ -1,12 +1,10 @@ import re from importlib import metadata -from io import BytesIO, StringIO from pathlib import Path from platform import system -from typing import List, Optional +from typing import List import matplotlib.pyplot as plt -import numpy as np import pytest from modflow_devtools.misc import is_in_ci diff --git a/autotest/regression/conftest.py b/autotest/regression/conftest.py deleted file mode 100644 index 66c064645..000000000 --- a/autotest/regression/conftest.py +++ /dev/null @@ -1,97 +0,0 @@ -from itertools import groupby -from os import linesep -from pathlib import Path -from tempfile import gettempdir - -import pytest -from filelock import FileLock -from modflow_devtools.download import download_and_unzip - -__mf6_examples = "mf6_examples" -__mf6_examples_path = Path(gettempdir()) / __mf6_examples -__mf6_examples_lock = FileLock(Path(gettempdir()) / f"{__mf6_examples}.lock") - - -def get_mf6_examples_path() -> Path: - # use file lock so mf6 distribution is downloaded once, - # even when tests are run in parallel with pytest-xdist - __mf6_examples_lock.acquire() - try: - if __mf6_examples_path.is_dir() and any(__mf6_examples_path.glob("*")): - print("Example models already exist") - else: - __mf6_examples_path.mkdir(exist_ok=True) - print("Downloading example models") - download_and_unzip( - url="https://github.com/MODFLOW-USGS/modflow6-examples/releases/download/current/modflow6-examples.zip", - path=str(__mf6_examples_path), - verbose=True, - ) - return __mf6_examples_path - finally: - __mf6_examples_lock.release() - - -def is_nested(namfile) -> bool: - p = Path(namfile) - if not p.is_file() or not p.name.endswith(".nam"): - raise ValueError(f"Expected a namfile path, got {p}") - - return p.parent.parent.name != __mf6_examples - - -def pytest_generate_tests(metafunc): - # examples to skip: - # - ex-gwtgwt-mt3dms-p10: https://github.com/MODFLOW-USGS/modflow6/pull/1008 - exclude = ["ex-gwt-gwtgwt-mt3dms-p10"] - namfiles = [ - str(p) - for p in get_mf6_examples_path().rglob("mfsim.nam") - if not any(e in str(p) for e in exclude) - ] - - # parametrization by model - # - single namfile per test case - # - no coupling (only first model in each simulation subdir is used) - key = "mf6_example_namfile" - if key in metafunc.fixturenames: - metafunc.parametrize(key, sorted(namfiles)) - - # parametrization by simulation - # - each test case gets an ordered list of 1+ namfiles - # - models can be coupled (run in order provided, sharing workspace) - key = "mf6_example_namfiles" - if key in metafunc.fixturenames: - simulations = [] - - def simulation_name_from_model_path(p): - p = Path(p) - return p.parent.parent.name if is_nested(p) else p.parent.name - - for model_name, model_namfiles in groupby( - namfiles, key=simulation_name_from_model_path - ): - models = sorted( - list(model_namfiles) - ) # sort in alphabetical order (gwf < gwt) - simulations.append(models) - print( - f"Simulation {model_name} has {len(models)} model(s):\n" - f"{linesep.join(model_namfiles)}" - ) - - def simulation_name_from_model_namfiles(mnams): - try: - namfile = next(iter(mnams), None) - except TypeError: - namfile = None - if namfile is None: - pytest.skip("No namfiles (expected ordered collection)") - namfile = Path(namfile) - return ( - namfile.parent.parent if is_nested(namfile) else namfile.parent - ).name - - metafunc.parametrize( - key, simulations, ids=simulation_name_from_model_namfiles - ) diff --git a/autotest/regression/test_mf6.py b/autotest/regression/test_mf6.py index 17824801a..e1c585088 100644 --- a/autotest/regression/test_mf6.py +++ b/autotest/regression/test_mf6.py @@ -49,7 +49,7 @@ from flopy.mf6.mfbase import FlopyException, MFDataException from flopy.mf6.utils import testutils from flopy.utils import CellBudgetFile -from flopy.utils.compare import compare_concentrations, compare_heads +from flopy.utils.compare import compare_heads from flopy.utils.datautil import PyListUtil pytestmark = pytest.mark.mf6 @@ -2317,7 +2317,7 @@ def test035_create_tests_fhb(function_tmpdir, example_data_path): @requires_exe("mf6") -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) @pytest.mark.regression def test006_create_tests_gwf3_disv(function_tmpdir, example_data_path): # init paths @@ -2853,6 +2853,11 @@ def test006_create_tests_2models_gnc(function_tmpdir, example_data_path): ) sim.remove_package(exg_package.package_type) + exg_data = { + "filename": "exg_data.txt", + "data": exgrecarray, + "binary": True, + } exg_package = ModflowGwfgwf( sim, print_input=True, @@ -2860,7 +2865,7 @@ def test006_create_tests_2models_gnc(function_tmpdir, example_data_path): save_flows=True, auxiliary="testaux", nexg=36, - exchangedata=exgrecarray, + exchangedata=exg_data, exgtype="gwf6-gwf6", exgmnamea=model_name_1, exgmnameb=model_name_2, @@ -2881,6 +2886,7 @@ def test006_create_tests_2models_gnc(function_tmpdir, example_data_path): # change folder to save simulation sim.set_sim_path(function_tmpdir) + exg_package.exchangedata.set_record(exg_data) # write simulation to new location sim.write_simulation() @@ -3393,7 +3399,7 @@ def test_create_tests_transport(function_tmpdir, example_data_path): pth = example_data_path / "mf6" / "create_tests" / test_ex_name expected_output_folder = pth / "expected_output" expected_head_file = expected_output_folder / "gwf_mst03.hds" - expected_conc_file = expected_output_folder / "gwt_mst03.unc" + expected_conc_file = expected_output_folder / "gwt_mst03.ucn" laytyp = [1] ss = [1.0e-10] @@ -3609,12 +3615,13 @@ def test_create_tests_transport(function_tmpdir, example_data_path): outfile=outfile, ) conc_new = function_tmpdir / "gwt_mst03.ucn" - assert compare_concentrations( + assert compare_heads( None, None, files1=expected_conc_file, files2=conc_new, outfile=outfile, + text="concentration", ) # clean up @@ -3680,7 +3687,6 @@ def test001a_tharmonic(function_tmpdir, example_data_path): # get expected results budget_obj = CellBudgetFile(expected_cbc_file_a, precision="auto") - budget_obj.list_records() budget_frf_valid = np.array( budget_obj.get_data(text=" FLOW JA FACE", full3D=True) ) @@ -4039,6 +4045,11 @@ def test006_2models_different_dis(function_tmpdir, example_data_path): exgrecarray = testutils.read_exchangedata( os.path.join(pth, "exg.txt"), 3, 2 ) + exg_data = { + "filename": "exg_data.bin", + "data": exgrecarray, + "binary": True, + } # build obs dictionary gwf_obs = { @@ -4055,7 +4066,7 @@ def test006_2models_different_dis(function_tmpdir, example_data_path): save_flows=True, auxiliary="testaux", nexg=9, - exchangedata=exgrecarray, + exchangedata=exg_data, exgtype="gwf6-gwf6", exgmnamea=model_name_1, exgmnameb=model_name_2, @@ -4077,6 +4088,7 @@ def test006_2models_different_dis(function_tmpdir, example_data_path): # change folder to save simulation sim.set_sim_path(function_tmpdir) + exg_package.exchangedata.set_record(exg_data) # write simulation to new location sim.write_simulation() @@ -4451,7 +4463,6 @@ def test006_2models_mvr(function_tmpdir, example_data_path): expected_cbc_file_a, precision="double", ) - budget_obj.list_records() # test getting models model_dict = sim.model_dict diff --git a/autotest/regression/test_mf6_examples.py b/autotest/regression/test_mf6_examples.py deleted file mode 100644 index 0b652dbf5..000000000 --- a/autotest/regression/test_mf6_examples.py +++ /dev/null @@ -1,93 +0,0 @@ -from pathlib import Path -from shutil import copytree - -import pytest -from modflow_devtools.markers import requires_exe, requires_pkg - -from autotest.regression.conftest import is_nested -from flopy.mf6 import MFSimulation -from flopy.utils.compare import compare_heads - -pytestmark = pytest.mark.mf6 - - -@requires_exe("mf6") -@pytest.mark.slow -@pytest.mark.regression -def test_mf6_example_simulations(function_tmpdir, mf6_example_namfiles): - # MF6 examples parametrized by simulation. `mf6_example_namfiles` is a list - # of models to run in order provided. Coupled models share the same tempdir - # - # Parameters - # ---------- - # function_tmpdir: function-scoped temporary directory fixture - # mf6_example_namfiles: ordered list of namfiles for 1+ coupled models - - # make sure we have at least 1 name file - if len(mf6_example_namfiles) == 0: - pytest.skip("No namfiles (expected ordered collection)") - namfile = Path(mf6_example_namfiles[0]) # pull the first model's namfile - - # coupled models have nested dirs (e.g., 'mf6gwf' and 'mf6gwt') under model directory - # TODO: are there multiple types of couplings? e.g. besides GWF-GWT, mt3dms? - nested = is_nested(namfile) - function_tmpdir = Path( - function_tmpdir / "workspace" - ) # working directory (must not exist for copytree) - cmpdir = function_tmpdir / "compare" # comparison directory - - # copy model files into working directory - copytree( - src=namfile.parent.parent if nested else namfile.parent, - dst=function_tmpdir, - ) - - def run_models(): - # run models in order received (should be alphabetical, so gwf precedes gwt) - for namfile in mf6_example_namfiles: - namfile_path = Path(namfile).resolve() - namfile_name = namfile_path.name - model_path = namfile_path.parent - - # working directory must be named according to the name file's parent (e.g. - # 'mf6gwf') because coupled models refer to each other with relative paths - wrkdir = ( - Path(function_tmpdir / model_path.name) - if nested - else function_tmpdir - ) - - # load simulation - sim = MFSimulation.load( - namfile_name, version="mf6", exe_name="mf6", sim_ws=wrkdir - ) - assert isinstance(sim, MFSimulation) - - # run simulation - success, buff = sim.run_simulation(report=True) - assert success - - # change to comparison workspace - sim.simulation_data.mfpath.set_sim_path(cmpdir) - - # write simulation files and rerun - sim.write_simulation() - success, _ = sim.run_simulation() - assert success - - # get head file outputs - headfiles1 = [p for p in wrkdir.glob("*.hds")] - headfiles2 = [p for p in cmpdir.glob("*.hds")] - - # compare heads - assert compare_heads( - None, - None, - precision="double", - text="head", - files1=[str(p) for p in headfiles1], - files2=[str(p) for p in headfiles2], - outfile=cmpdir / "head_compare.dat", - ) - - run_models() diff --git a/autotest/regression/test_mf6_pandas.py b/autotest/regression/test_mf6_pandas.py index 183ed2ff0..7ef875ada 100644 --- a/autotest/regression/test_mf6_pandas.py +++ b/autotest/regression/test_mf6_pandas.py @@ -182,13 +182,13 @@ def test_pandas_001(function_tmpdir, example_data_path): assert well_data_pd.iloc[0, 1] == 0 assert well_data_pd.iloc[0, 2] == 4 assert well_data_pd.iloc[0, 3] == -2000.0 - assert well_data_pd["layer"][0] == 0 - assert well_data_pd["row"][0] == 0 - assert well_data_pd["column"][0] == 4 + assert well_data_pd["cellid_layer"][0] == 0 + assert well_data_pd["cellid_row"][0] == 0 + assert well_data_pd["cellid_column"][0] == 4 assert well_data_pd["q"][0] == -2000.0 - assert well_data_pd["layer"][1] == 0 - assert well_data_pd["row"][1] == 0 - assert well_data_pd["column"][1] == 7 + assert well_data_pd["cellid_layer"][1] == 0 + assert well_data_pd["cellid_row"][1] == 0 + assert well_data_pd["cellid_column"][1] == 7 assert well_data_pd["q"][1] == -2.0 well_data_rec = wel_package.stress_period_data.get_data(0) @@ -284,13 +284,13 @@ def test_pandas_001(function_tmpdir, example_data_path): assert well_data_pd_0.iloc[0, 1] == 0 assert well_data_pd_0.iloc[0, 2] == 4 assert well_data_pd_0.iloc[0, 3] == -2000.0 - assert well_data_pd_0["layer"][0] == 0 - assert well_data_pd_0["row"][0] == 0 - assert well_data_pd_0["column"][0] == 4 + assert well_data_pd_0["cellid_layer"][0] == 0 + assert well_data_pd_0["cellid_row"][0] == 0 + assert well_data_pd_0["cellid_column"][0] == 4 assert well_data_pd_0["q"][0] == -2000.0 - assert well_data_pd_0["layer"][1] == 0 - assert well_data_pd_0["row"][1] == 0 - assert well_data_pd_0["column"][1] == 7 + assert well_data_pd_0["cellid_layer"][1] == 0 + assert well_data_pd_0["cellid_row"][1] == 0 + assert well_data_pd_0["cellid_column"][1] == 7 assert well_data_pd_0["q"][1] == -2.0 well_data_pd = test_wel.stress_period_data.get_dataframe(1) assert isinstance(well_data_pd, pd.DataFrame) @@ -298,13 +298,13 @@ def test_pandas_001(function_tmpdir, example_data_path): assert well_data_pd.iloc[0, 1] == 0 assert well_data_pd.iloc[0, 2] == 4 assert well_data_pd.iloc[0, 3] == -1000.0 - assert well_data_pd["layer"][0] == 0 - assert well_data_pd["row"][0] == 0 - assert well_data_pd["column"][0] == 4 + assert well_data_pd["cellid_layer"][0] == 0 + assert well_data_pd["cellid_row"][0] == 0 + assert well_data_pd["cellid_column"][0] == 4 assert well_data_pd["q"][0] == -1000.0 - assert well_data_pd["layer"][1] == 0 - assert well_data_pd["row"][1] == 0 - assert well_data_pd["column"][1] == 7 + assert well_data_pd["cellid_layer"][1] == 0 + assert well_data_pd["cellid_row"][1] == 0 + assert well_data_pd["cellid_column"][1] == 7 assert well_data_pd["q"][1] == -20.0 test_riv = test_mod.get_package("riv") riv_data_pd = test_riv.stress_period_data.get_dataframe(0) diff --git a/autotest/test_binaryfile.py b/autotest/test_binaryfile.py index a4e5a400e..09351f4bf 100644 --- a/autotest/test_binaryfile.py +++ b/autotest/test_binaryfile.py @@ -1,19 +1,26 @@ +"""Test flopy.utils.binaryfile module. + +See also test_cellbudgetfile.py for similar tests. +""" + from itertools import repeat +from pprint import pformat import numpy as np +import pandas as pd import pytest from matplotlib import pyplot as plt from matplotlib.axes import Axes -from modflow_devtools.markers import requires_exe +from modflow_devtools.markers import requires_exe, requires_pkg import flopy -from autotest.conftest import get_example_data_path from flopy.modflow import Modflow from flopy.utils import ( BinaryHeader, CellBudgetFile, HeadFile, HeadUFile, + UcnFile, Util2d, ) from flopy.utils.binaryfile import ( @@ -21,7 +28,7 @@ write_budget, write_head, ) -from flopy.utils.gridutil import uniform_flow_field +from flopy.utils.gridutil import get_disv_kwargs, uniform_flow_field @pytest.fixture @@ -39,6 +46,170 @@ def zonbud_model_path(example_data_path): return example_data_path / "zonbud_examples" +def test_binaryread(example_data_path): + # test low-level binaryread() method + pth = example_data_path / "freyberg" / "freyberg.githds" + with open(pth, "rb") as fp: + res = flopy.utils.binaryfile.binaryread(fp, np.int32, 2) + np.testing.assert_array_equal(res, np.array([1, 1], np.int32)) + res = flopy.utils.binaryfile.binaryread(fp, np.float32, 2) + np.testing.assert_array_equal(res, np.array([10, 10], np.float32)) + res = flopy.utils.binaryfile.binaryread(fp, bytes) + assert res == b" HEAD" + res = flopy.utils.binaryfile.binaryread(fp, np.int32) + assert res == 20 + + +def test_binaryread_misc(tmp_path): + # Check deprecated warning + file = tmp_path / "data.file" + file.write_bytes(b" data") + with file.open("rb") as fp: + with pytest.deprecated_call(match="vartype=str is deprecated"): + res = flopy.utils.binaryfile.binaryread(fp, str, charlen=5) + assert res == b" data" + # Test exceptions with a small file with 1 byte + file.write_bytes(b"\x00") + with file.open("rb") as fp: + with pytest.raises(EOFError): + flopy.utils.binaryfile.binaryread(fp, bytes, charlen=6) + with file.open("rb") as fp: + with pytest.raises(EOFError): + flopy.utils.binaryfile.binaryread(fp, np.int32) + + +def test_deprecated_binaryread_struct(example_data_path): + # similar to test_binaryread(), but check the calls are deprecated + pth = example_data_path / "freyberg" / "freyberg.githds" + with open(pth, "rb") as fp: + with pytest.deprecated_call(): + res = flopy.utils.binaryfile.binaryread_struct(fp, np.int32, 2) + np.testing.assert_array_equal(res, np.array([1, 1], np.int32)) + with pytest.deprecated_call(): + res = flopy.utils.binaryfile.binaryread_struct(fp, np.float32, 2) + np.testing.assert_array_equal(res, np.array([10, 10], np.float32)) + with pytest.deprecated_call(): + res = flopy.utils.binaryfile.binaryread_struct(fp, str) + assert res == b" HEAD" + with pytest.deprecated_call(): + res = flopy.utils.binaryfile.binaryread_struct(fp, np.int32) + assert res == 20 + + +def test_headfile_build_index(example_data_path): + # test low-level BinaryLayerFile._build_index() method + pth = example_data_path / "freyberg_multilayer_transient" / "freyberg.hds" + with HeadFile(pth) as hds: + pass + assert hds.nrow == 40 + assert hds.ncol == 20 + assert hds.nlay == 3 + assert not hasattr(hds, "nper") + assert hds.totalbytes == 10_676_004 + assert len(hds.recordarray) == 3291 + assert type(hds.recordarray) == np.ndarray + assert hds.recordarray.dtype == np.dtype( + [ + ("kstp", "i4"), + ("kper", "i4"), + ("pertim", "f4"), + ("totim", "f4"), + ("text", "S16"), + ("ncol", "i4"), + ("nrow", "i4"), + ("ilay", "i4"), + ] + ) + # check first and last recorddict + list_recordarray = hds.recordarray.tolist() + assert list_recordarray[0] == ( + (1, 1, 1.0, 1.0, b" HEAD", 20, 40, 1) + ) + assert list_recordarray[-1] == ( + (1, 1097, 1.0, 1097.0, b" HEAD", 20, 40, 3) + ) + assert hds.times == list((np.arange(1097) + 1).astype(np.float32)) + assert hds.kstpkper == [(1, kper + 1) for kper in range(1097)] + np.testing.assert_array_equal(hds.iposarray, np.arange(3291) * 3244 + 44) + assert hds.iposarray.dtype == np.int64 + with pytest.deprecated_call(match="use headers instead"): + assert hds.list_records() is None + # check first and last row of data frame + pd.testing.assert_frame_equal( + hds.headers.iloc[[0, -1]], + pd.DataFrame( + { + "kstp": np.array([1, 1], np.int32), + "kper": np.array([1, 1097], np.int32), + "pertim": np.array([1.0, 1.0], np.float32), + "totim": np.array([1.0, 1097.0], np.float32), + "text": ["HEAD", "HEAD"], + "ncol": np.array([20, 20], np.int32), + "nrow": np.array([40, 40], np.int32), + "ilay": np.array([1, 3], np.int32), + }, + index=[44, 10672804], + ), + ) + + +def test_concentration_build_index(example_data_path): + # test low-level BinaryLayerFile._build_index() method with UCN file + pth = example_data_path / "mt3d_test/mf2005mt3d/P07/MT3D001.UCN" + with UcnFile(pth) as ucn: + pass + assert ucn.nrow == 15 + assert ucn.ncol == 21 + assert ucn.nlay == 8 + assert not hasattr(ucn, "nper") + assert ucn.totalbytes == 10_432 + assert len(ucn.recordarray) == 8 + assert type(ucn.recordarray) == np.ndarray + assert ucn.recordarray.dtype == np.dtype( + [ + ("ntrans", "i4"), + ("kstp", "i4"), + ("kper", "i4"), + ("totim", "f4"), + ("text", "S16"), + ("ncol", "i4"), + ("nrow", "i4"), + ("ilay", "i4"), + ] + ) + # check first and last recorddict + list_recordarray = ucn.recordarray.tolist() + assert list_recordarray[0] == ( + (29, 1, 1, 100.0, b"CONCENTRATION ", 21, 15, 1) + ) + assert list_recordarray[-1] == ( + (29, 1, 1, 100.0, b"CONCENTRATION ", 21, 15, 8) + ) + assert ucn.times == [np.float32(100.0)] + assert ucn.kstpkper == [(1, 1)] + np.testing.assert_array_equal(ucn.iposarray, np.arange(8) * 1304 + 44) + assert ucn.iposarray.dtype == np.int64 + with pytest.deprecated_call(match="use headers instead"): + assert ucn.list_records() is None + # check first and last row of data frame + pd.testing.assert_frame_equal( + ucn.headers.iloc[[0, -1]], + pd.DataFrame( + { + "ntrans": np.array([29, 29], np.int32), + "kstp": np.array([1, 1], np.int32), + "kper": np.array([1, 1], np.int32), + "totim": np.array([100.0, 100.0], np.float32), + "text": ["CONCENTRATION", "CONCENTRATION"], + "ncol": np.array([21, 21], np.int32), + "nrow": np.array([15, 15], np.int32), + "ilay": np.array([1, 8], np.int32), + }, + index=[44, 9172], + ), + ) + + def test_binaryfile_writeread(function_tmpdir, nwt_model_path): model = "Pr3_MFNWT_lower.nam" ml = flopy.modflow.Modflow.load( @@ -111,18 +282,6 @@ def test_binaryfile_writeread(function_tmpdir, nwt_model_path): assert np.allclose(b, br), errmsg -def test_load_cell_budget_file_timeseries(example_data_path): - cbf = CellBudgetFile( - example_data_path / "mf2005_test" / "swiex1.gitzta", - precision="single", - ) - ts = cbf.get_ts(text="ZETASRF 1", idx=(0, 0, 24)) - assert ts.shape == ( - 4, - 2, - ), f"shape of zeta timeseries is {ts.shape} not (4, 2)" - - def test_load_binary_head_file(example_data_path): mpath = example_data_path / "freyberg" hf = HeadFile(mpath / "freyberg.githds") @@ -171,9 +330,15 @@ def test_headu_file_data(function_tmpdir, example_data_path): @pytest.mark.slow def test_headufile_get_ts(example_data_path): heads = HeadUFile(example_data_path / "unstructured" / "headu.githds") - nnodes = 19479 + + # check number of records (headers) + assert len(heads) == 15 + with pytest.deprecated_call(): + assert heads.get_nrecords() == 15 + assert not hasattr(heads, "nrecords") # make sure timeseries can be retrieved for each node + nnodes = 19479 for i in range(0, nnodes, 100): heads.get_ts(idx=i) with pytest.raises(IndexError): @@ -190,6 +355,7 @@ def test_headufile_get_ts(example_data_path): / "output" / "flow.hds" ) + assert len(heads) == 1 nnodes = 121 for i in range(nnodes): heads.get_ts(idx=i) @@ -217,41 +383,6 @@ def test_get_headfile_precision(example_data_path): assert precision == "double" -_example_data_path = get_example_data_path() - - -@pytest.mark.parametrize( - "path", - [ - _example_data_path / "mf2005_test" / "swiex1.gitzta", - _example_data_path / "mp6" / "EXAMPLE.BUD", - _example_data_path - / "mfusg_test" - / "01A_nestedgrid_nognc" - / "output" - / "flow.cbc", - ], -) -def test_budgetfile_detect_precision_single(path): - file = CellBudgetFile(path, precision="auto") - assert file.realtype == np.float32 - - -@pytest.mark.parametrize( - "path", - [ - _example_data_path - / "mf6" - / "test006_gwf3" - / "expected_output" - / "flow_adj.cbc", - ], -) -def test_budgetfile_detect_precision_double(path): - file = CellBudgetFile(path, precision="auto") - assert file.realtype == np.float64 - - def test_write_head(function_tmpdir): file_path = function_tmpdir / "headfile" head_data = np.random.random((10, 10)) @@ -293,6 +424,12 @@ def test_binaryfile_read(function_tmpdir, freyberg_model_path): h = HeadFile(freyberg_model_path / "freyberg.githds") assert isinstance(h, HeadFile) + # check number of records (headers) + assert len(h) == 1 + with pytest.deprecated_call(): + assert h.get_nrecords() == 1 + assert not hasattr(h, "nrecords") + times = h.get_times() assert np.isclose(times[0], 10.0), f"times[0] != {times[0]}" @@ -339,51 +476,185 @@ def test_binaryfile_read_context(freyberg_model_path): assert str(e.value) == "seek of closed file", str(e.value) -def test_headfile_reverse_mf6(example_data_path, function_tmpdir): +def test_binaryfile_reverse_mf6_dis(function_tmpdir): + name = "reverse_dis" + sim = flopy.mf6.MFSimulation( + sim_name=name, sim_ws=function_tmpdir, exe_name="mf6" + ) + tdis_rc = [(1, 1, 1.0), (1, 1, 1.0)] + nper = len(tdis_rc) + tdis = flopy.mf6.ModflowTdis(sim, nper=nper, perioddata=tdis_rc) + ims = flopy.mf6.ModflowIms(sim) + gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) + dis = flopy.mf6.ModflowGwfdis(gwf, nrow=10, ncol=10) + dis = gwf.get_package("DIS") + nlay = 2 + botm = [1 - (k + 1) for k in range(nlay)] + botm_data = np.array([list(repeat(b, 10 * 10)) for b in botm]).reshape( + (nlay, 10, 10) + ) + dis.nlay = nlay + dis.botm.set_data(botm_data) + ic = flopy.mf6.ModflowGwfic(gwf) + npf = flopy.mf6.ModflowGwfnpf(gwf, save_specific_discharge=True) + chd = flopy.mf6.ModflowGwfchd( + gwf, stress_period_data=[[(0, 0, 0), 1.0], [(0, 9, 9), 0.0]] + ) + budget_file = name + ".bud" + head_file = name + ".hds" + oc = flopy.mf6.ModflowGwfoc( + gwf, + budget_filerecord=budget_file, + head_filerecord=head_file, + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + sim.write_simulation(silent=True) + success, buff = sim.run_simulation(silent=True, report=True) + assert success, pformat(buff) + + # reverse head file in place and check reversal + head_file = flopy.utils.HeadFile(function_tmpdir / head_file, tdis=tdis) + heads = head_file.get_alldata() + assert heads.shape == (nper, 2, 10, 10) + head_file.reverse() + heads_rev = head_file.get_alldata() + assert heads_rev.shape == (nper, 2, 10, 10) + + # reverse budget and write to separate file + budget_file_rev_path = function_tmpdir / f"{budget_file}_rev" + budget_file = flopy.utils.CellBudgetFile( + function_tmpdir / budget_file, tdis=tdis + ) + budget_file.reverse(budget_file_rev_path) + budget_file_rev = flopy.utils.CellBudgetFile( + budget_file_rev_path, tdis=tdis + ) + + for kper in range(nper): + assert np.allclose(heads[kper], heads_rev[-kper + 1]) + budget = budget_file.get_data(text="FLOW-JA-FACE", totim=kper)[0] + budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=kper)[ + 0 + ] + assert budget.shape == budget_rev.shape + assert np.allclose(budget, -budget_rev) + + +@requires_pkg("shapely") +def test_binaryfile_reverse_mf6_disv(function_tmpdir): + name = "reverse_disv" + sim = flopy.mf6.MFSimulation( + sim_name=name, sim_ws=function_tmpdir, exe_name="mf6" + ) + tdis_rc = [(1, 1, 1.0), (1, 1, 1.0)] + nper = len(tdis_rc) + tdis = flopy.mf6.ModflowTdis(sim, nper=nper, perioddata=tdis_rc) + ims = flopy.mf6.ModflowIms(sim) + gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) + dis = flopy.mf6.ModflowGwfdisv( + gwf, **get_disv_kwargs(2, 10, 10, 1.0, 1.0, 25.0, [20.0, 15.0]) + ) + ic = flopy.mf6.ModflowGwfic(gwf) + npf = flopy.mf6.ModflowGwfnpf(gwf, save_specific_discharge=True) + chd = flopy.mf6.ModflowGwfchd( + gwf, stress_period_data=[[(0, 0, 0), 1.0], [(0, 9, 9), 0.0]] + ) + budget_file = name + ".bud" + head_file = name + ".hds" + oc = flopy.mf6.ModflowGwfoc( + gwf, + budget_filerecord=budget_file, + head_filerecord=head_file, + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + sim.write_simulation(silent=True) + success, buff = sim.run_simulation(silent=True) + assert success, pformat(buff) + + # reverse head file in place and check reversal + head_file = flopy.utils.HeadFile(function_tmpdir / head_file, tdis=tdis) + heads = head_file.get_alldata() + assert heads.shape == (nper, 2, 1, 100) + head_file.reverse() + heads_rev = head_file.get_alldata() + assert heads_rev.shape == (nper, 2, 1, 100) + + # reverse budget and write to separate file + budget_file_rev_path = function_tmpdir / f"{budget_file}_rev" + budget_file = flopy.utils.CellBudgetFile( + function_tmpdir / budget_file, tdis=tdis + ) + budget_file.reverse(budget_file_rev_path) + budget_file_rev = flopy.utils.CellBudgetFile( + budget_file_rev_path, tdis=tdis + ) + + for kper in range(nper): + assert np.allclose(heads[kper], heads_rev[-kper + 1]) + budget = budget_file.get_data(text="FLOW-JA-FACE", totim=kper)[0] + budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=kper)[ + 0 + ] + assert budget.shape == budget_rev.shape + assert np.allclose(budget, -budget_rev) + + +def test_binaryfile_reverse_mf6_disu(example_data_path, function_tmpdir): # load simulation and extract tdis sim_name = "test006_gwf3" sim = flopy.mf6.MFSimulation.load( sim_name=sim_name, sim_ws=example_data_path / "mf6" / sim_name ) - tdis = sim.get_package("tdis") + tdis_rc = [(1, 1, 1.0), (1, 1, 1.0)] + nper = len(tdis_rc) + tdis = flopy.mf6.ModflowTdis( + sim, time_units="DAYS", nper=nper, perioddata=tdis_rc + ) + sim.set_sim_path(function_tmpdir) + sim.write_simulation() + sim.run_simulation() + + # load head file, providing tdis as kwarg + file_path = function_tmpdir / "flow.hds" + head_file = HeadFile(file_path, tdis=tdis) + + # reverse and write to a separate file + head_file_rev_path = function_tmpdir / "flow_rev.hds" + head_file.reverse(filename=head_file_rev_path) + head_file_rev = HeadFile(head_file_rev_path, tdis=tdis) - # load cell budget file, providing tdis as kwarg - model_path = example_data_path / "mf6" / sim_name - file_stem = "flow_adj" - file_path = model_path / "expected_output" / f"{file_stem}.hds" - f = HeadFile(file_path, tdis=tdis) - assert isinstance(f, HeadFile) + # load budget file + file_path = function_tmpdir / "flow.cbc" + budget_file = CellBudgetFile(file_path, tdis=tdis) - # reverse the file - rf_name = f"{file_stem}_rev.hds" - f.reverse(filename=function_tmpdir / rf_name) - rf = HeadFile(function_tmpdir / rf_name) - assert isinstance(rf, HeadFile) + # reverse and write to a separate file + budget_file_rev_path = function_tmpdir / "flow_rev.cbc" + budget_file.reverse(filename=budget_file_rev_path) + budget_file_rev = CellBudgetFile(budget_file_rev_path, tdis=tdis) # check that data from both files have the same shape - f_data = f.get_alldata() - f_shape = f_data.shape - rf_data = rf.get_alldata() - rf_shape = rf_data.shape - assert f_shape == rf_shape + assert head_file.get_alldata().shape == (nper, 1, 1, 121) + assert head_file_rev.get_alldata().shape == (nper, 1, 1, 121) # check number of records - nrecords = f.get_nrecords() - assert nrecords == rf.get_nrecords() + assert len(head_file) == nper + assert len(head_file_rev) == nper + assert len(budget_file) == nper * 2 + assert len(budget_file_rev) == nper * 2 # check that the data are reversed + nrecords = len(head_file) for idx in range(nrecords - 1, -1, -1): - # check headers - f_header = list(f.recordarray[nrecords - idx - 1]) - rf_header = list(rf.recordarray[idx]) - f_totim = f_header.pop(9) # todo check totim - rf_totim = rf_header.pop(9) - assert f_header == rf_header - assert f_header == rf_header - - # check data - f_data = f.get_data(idx=idx)[0] - rf_data = rf.get_data(idx=nrecords - idx - 1)[0] + # check headfile headers + f_header = list(head_file.recordarray[nrecords - idx - 1]) + rf_header = list(head_file_rev.recordarray[idx]) + assert f_header != rf_header + + # check headfile data + f_data = head_file.get_data(idx=idx)[0] + rf_data = head_file_rev.get_data(idx=nrecords - idx - 1)[0] assert f_data.shape == rf_data.shape if f_data.ndim == 1: for row in range(len(f_data)): @@ -393,6 +664,13 @@ def test_headfile_reverse_mf6(example_data_path, function_tmpdir): else: assert np.array_equal(f_data[0][0], rf_data[0][0]) + budget = budget_file.get_data(text="FLOW-JA-FACE", totim=idx)[0] + budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=idx)[ + 0 + ] + assert budget.shape == budget_rev.shape + assert np.allclose(budget, -budget_rev) + @pytest.fixture @pytest.mark.mf6 @@ -559,22 +837,3 @@ def test_read_mf2005_freyberg(example_data_path, function_tmpdir, compact): assert len(cbb_data) == len(cbb_data_kstpkper) for i in range(len(cbb_data)): assert np.array_equal(cbb_data[i], cbb_data_kstpkper[i]) - - -def test_read_mf6_budgetfile(example_data_path): - cbb_file = ( - example_data_path - / "mf6" - / "test005_advgw_tidal" - / "expected_output" - / "AdvGW_tidal.cbc" - ) - cbb = CellBudgetFile(cbb_file) - rch_zone_1 = cbb.get_data(paknam2="rch-zone_1".upper()) - rch_zone_2 = cbb.get_data(paknam2="rch-zone_2".upper()) - rch_zone_3 = cbb.get_data(paknam2="rch-zone_3".upper()) - - # ensure there is a record for each time step - assert len(rch_zone_1) == 120 * 3 + 1 - assert len(rch_zone_2) == 120 * 3 + 1 - assert len(rch_zone_3) == 120 * 3 + 1 diff --git a/autotest/test_cbc_full3D.py b/autotest/test_cbc_full3D.py index e347a70fa..54bad1064 100644 --- a/autotest/test_cbc_full3D.py +++ b/autotest/test_cbc_full3D.py @@ -31,7 +31,6 @@ def load_mf2005(path, ws_out): ) # change work space - # ws_out = os.path.join(baseDir, name) ml.change_model_ws(ws_out) # save all budget data to a cell-by cell file @@ -87,7 +86,7 @@ def cbc_eval_size(cbcobj, nnodes, shape3d): def cbc_eval_data(cbcobj, shape3d): cbc_pth = cbcobj.filename print(f"{cbc_pth}:\n") - cbcobj.list_unique_records() + print(cbcobj.headers[["text", "imeth"]].drop_duplicates()) names = cbcobj.get_unique_record_names(decode=True) times = cbcobj.get_times() diff --git a/autotest/test_cellbudgetfile.py b/autotest/test_cellbudgetfile.py index 175df6037..ebcaf15a9 100644 --- a/autotest/test_cellbudgetfile.py +++ b/autotest/test_cellbudgetfile.py @@ -1,17 +1,368 @@ import os import numpy as np +import pandas as pd import pytest +from autotest.conftest import get_example_data_path from flopy.mf6.modflow.mfsimulation import MFSimulation from flopy.utils.binaryfile import CellBudgetFile +# test low-level CellBudgetFile._build_index() method + + +def test_cellbudgetfile_build_index_classic(example_data_path): + """Test reading "classic" budget file, without "COMPACT BUDGET" option.""" + pth = example_data_path / "mt3d_test/mf2kmt3d/mnw/t5.cbc" + with CellBudgetFile(pth) as cbc: + pass + assert cbc.nrow == 101 + assert cbc.ncol == 101 + assert cbc.nlay == 3 + assert cbc.nper == 1 + assert cbc.totalbytes == 122_448 + assert len(cbc.recordarray) == 1 + assert type(cbc.recordarray) == np.ndarray + assert cbc.recordarray.dtype == np.dtype( + [ + ("kstp", "i4"), + ("kper", "i4"), + ("text", "S16"), + ("ncol", "i4"), + ("nrow", "i4"), + ("nlay", "i4"), + ("imeth", "i4"), + ("delt", "f4"), + ("pertim", "f4"), + ("totim", "f4"), + ("modelnam", "S16"), + ("paknam", "S16"), + ("modelnam2", "S16"), + ("paknam2", "S16"), + ] + ) + assert len(cbc.recorddict) == 1 + list_recorddict = list(cbc.recorddict.items()) + # fmt: off + assert list_recorddict == [( + (1, 1, b" MNW", 101, 101, 3, 0, 0.0, 0.0, -1.0, b"", b"", b"", b""), + 36) + ] + # fmt: on + assert cbc.times == [] + assert cbc.kstpkper == [(1, 1)] + np.testing.assert_array_equal(cbc.iposheader, np.array([0])) + assert cbc.iposheader.dtype == np.int64 + np.testing.assert_array_equal(cbc.iposarray, np.array([36])) + assert cbc.iposarray.dtype == np.int64 + assert cbc.textlist == [b" MNW"] + assert cbc.imethlist == [0] + assert cbc.paknamlist_from == [b""] + assert cbc.paknamlist_to == [b""] + pd.testing.assert_frame_equal( + cbc.headers, + pd.DataFrame( + { + "kstp": np.array([1], np.int32), + "kper": np.array([1], np.int32), + "text": ["MNW"], + "ncol": np.array([101], np.int32), + "nrow": np.array([101], np.int32), + "nlay": np.array([3], np.int32), + }, + index=[36], + ), + ) + + +def test_cellbudgetfile_build_index_compact(example_data_path): + """Test reading mfntw budget file, with "COMPACT BUDGET" option.""" + pth = example_data_path / "freyberg_multilayer_transient" / "freyberg.cbc" + with CellBudgetFile(pth) as cbc: + pass + assert cbc.nrow == 40 + assert cbc.ncol == 20 + assert cbc.nlay == 3 + assert cbc.nper == 1097 + assert cbc.totalbytes == 42_658_384 + assert len(cbc.recordarray) == 5483 + assert type(cbc.recordarray) == np.ndarray + assert cbc.recordarray.dtype == np.dtype( + [ + ("kstp", "i4"), + ("kper", "i4"), + ("text", "S16"), + ("ncol", "i4"), + ("nrow", "i4"), + ("nlay", "i4"), + ("imeth", "i4"), + ("delt", "f4"), + ("pertim", "f4"), + ("totim", "f4"), + ("modelnam", "S16"), + ("paknam", "S16"), + ("modelnam2", "S16"), + ("paknam2", "S16"), + ] + ) + assert len(cbc.recorddict) == 5483 + # check first and last recorddict + list_recorddict = list(cbc.recorddict.items()) + # fmt: off + assert list_recorddict[0] == ( + (1, 1, b" CONSTANT HEAD", 20, 40, -3, 2, 1.0, 1.0, 1.0, b"", b"", b"", b""), + 52, + ) + assert list_recorddict[-1] == ( + (1, 1097, b"FLOW LOWER FACE ", 20, 40, -3, 1, 1.0, 1.0, 1097.0, b"", b"", b"", b""), + 42648784, + ) + # fmt: on + assert cbc.times == list((np.arange(1097) + 1).astype(np.float32)) + assert cbc.kstpkper == [(1, kper + 1) for kper in range(1097)] + # fmt: off + expected_iposheader = np.cumsum([0] + + ([296] + [9652] * 4) * 1095 + + [296] + [9652] * 3 + + [296] + [9652] * 2) + # fmt: on + np.testing.assert_array_equal(cbc.iposheader, expected_iposheader) + assert cbc.iposheader.dtype == np.int64 + np.testing.assert_array_equal(cbc.iposarray, expected_iposheader + 52) + assert cbc.iposarray.dtype == np.int64 + assert cbc.textlist == [ + b" CONSTANT HEAD", + b"FLOW RIGHT FACE ", + b"FLOW FRONT FACE ", + b"FLOW LOWER FACE ", + b" STORAGE", + ] + assert cbc.imethlist == [2, 1, 1, 1, 1] + assert cbc.paknamlist_from == [b""] + assert cbc.paknamlist_to == [b""] + # check first and last row of data frame + pd.testing.assert_frame_equal( + cbc.headers.iloc[[0, -1]], + pd.DataFrame( + { + "kstp": np.array([1, 1], np.int32), + "kper": np.array([1, 1097], np.int32), + "text": ["CONSTANT HEAD", "FLOW LOWER FACE"], + "ncol": np.array([20, 20], np.int32), + "nrow": np.array([40, 40], np.int32), + "nlay": np.array([-3, -3], np.int32), + "imeth": np.array([2, 1], np.int32), + "delt": np.array([1.0, 1.0], np.float32), + "pertim": np.array([1.0, 1.0], np.float32), + "totim": np.array([1.0, 1097.0], np.float32), + }, + index=[52, 42648784], + ), + ) + + +def test_cellbudgetfile_build_index_mf6(example_data_path): + cbb_file = ( + example_data_path + / "mf6" + / "test005_advgw_tidal" + / "expected_output" + / "AdvGW_tidal.cbc" + ) + with CellBudgetFile(cbb_file) as cbb: + pass + assert cbb.nrow == 15 + assert cbb.ncol == 10 + assert cbb.nlay == 3 + assert cbb.nper == 4 + assert cbb.totalbytes == 13_416_552 + assert len(cbb.recordarray) == 3610 + assert type(cbb.recordarray) == np.ndarray + assert cbb.recordarray.dtype == np.dtype( + [ + ("kstp", "i4"), + ("kper", "i4"), + ("text", "S16"), + ("ncol", "i4"), + ("nrow", "i4"), + ("nlay", "i4"), + ("imeth", "i4"), + ("delt", "f8"), + ("pertim", "f8"), + ("totim", "f8"), + ("modelnam", "S16"), + ("paknam", "S16"), + ("modelnam2", "S16"), + ("paknam2", "S16"), + ] + ) + assert len(cbb.recorddict) == 3610 + # check first and last recorddict + list_recorddict = list(cbb.recorddict.items()) + # fmt: off + assert list_recorddict[0] == ( + (1, 1, b" STO-SS", 10, 15, -3, 1, + 1.0, 1.0, 1.0, + b"", b"", b"", b""), + 64, + ) + assert list_recorddict[-1] == ( + (120, 4, b" EVT", 10, 15, -3, 6, + 0.08333333333333333, 10.000000000000002, 30.99999999999983, + b"GWF_1 ", b"GWF_1 ", b"GWF_1 ", b"EVT "), + 13414144, + ) + # fmt: on + assert isinstance(cbb.times, list) + np.testing.assert_allclose(cbb.times, np.linspace(1.0, 31, 361)) + # fmt: off + assert cbb.kstpkper == ( + [(1, 1)] + + [(kstp + 1, 2) for kstp in range(120)] + + [(kstp + 1, 3) for kstp in range(120)] + + [(kstp + 1, 4) for kstp in range(120)] + ) + # fmt: on + # this file has a complex structure, so just look at unique ipos spacings + assert set(np.diff(cbb.iposheader)) == ( + {184, 264, 304, 384, 456, 616, 632, 1448, 2168, 2536, 3664, 21664} + ) + assert cbb.iposheader[0] == 0 + assert cbb.iposheader.dtype == np.int64 + assert set(np.diff(cbb.iposarray)) == ( + {184, 264, 304, 384, 456, 616, 632, 1448, 2168, 2472, 3664, 21728} + ) + assert cbb.iposarray[0] == 64 + assert cbb.iposarray.dtype == np.int64 + # variable size headers depending on imeth + header_sizes = np.full(3610, 64) + header_sizes[cbb.recordarray["imeth"] == 6] = 128 + np.testing.assert_array_equal(cbb.iposheader + header_sizes, cbb.iposarray) + assert cbb.textlist == [ + b" STO-SS", + b" STO-SY", + b" FLOW-JA-FACE", + b" WEL", + b" RIV", + b" GHB", + b" RCH", + b" EVT", + ] + assert cbb.imethlist == [1, 1, 1, 6, 6, 6, 6, 6] + assert cbb.paknamlist_from == [b"", b"GWF_1 "] + assert cbb.paknamlist_to == [ + b"", + b"WEL ", + b"RIV ", + b"GHB-TIDAL ", + b"RCH-ZONE_1 ", + b"RCH-ZONE_2 ", + b"RCH-ZONE_3 ", + b"EVT ", + ] + # check first and last row of data frame + pd.testing.assert_frame_equal( + cbb.headers.iloc[[0, -1]], + pd.DataFrame( + { + "kstp": np.array([1, 120], np.int32), + "kper": np.array([1, 4], np.int32), + "text": ["STO-SS", "EVT"], + "ncol": np.array([10, 10], np.int32), + "nrow": np.array([15, 15], np.int32), + "nlay": np.array([-3, -3], np.int32), + "imeth": np.array([1, 6], np.int32), + "delt": [1.0, 0.08333333333333333], + "pertim": [1.0, 10.0], + "totim": [1.0, 31.0], + "modelnam": ["", "GWF_1"], + "paknam": ["", "GWF_1"], + "modelnam2": ["", "GWF_1"], + "paknam2": ["", "EVT"], + }, + index=[64, 13414144], + ), + ) + + +def test_cellbudgetfile_imeth_5(example_data_path): + pth = example_data_path / "preserve_unitnums/testsfr2.ghb.cbc" + with CellBudgetFile(pth) as cbc: + pass + # check a few components + pd.testing.assert_index_equal( + cbc.headers.index, pd.Index(np.arange(12, dtype=np.int64) * 156 + 64) + ) + assert cbc.headers.text.unique().tolist() == ["HEAD DEP BOUNDS"] + assert cbc.headers.imeth.unique().tolist() == [5] + @pytest.fixture def zonbud_model_path(example_data_path): return example_data_path / "zonbud_examples" +def test_cellbudgetfile_get_indices_nrecords(example_data_path): + pth = example_data_path / "freyberg_multilayer_transient" / "freyberg.cbc" + with CellBudgetFile(pth) as cbc: + pass + assert cbc.get_indices() is None + idxs = cbc.get_indices("constant head") + assert type(idxs) == np.ndarray + assert idxs.dtype == np.int64 + np.testing.assert_array_equal(idxs, list(range(0, 5476, 5)) + [5479]) + idxs = cbc.get_indices(b" STORAGE") + np.testing.assert_array_equal(idxs, list(range(4, 5475, 5))) + + assert len(cbc) == 5483 + with pytest.deprecated_call(): + assert cbc.nrecords == 5483 + with pytest.deprecated_call(): + assert cbc.get_nrecords() == 5483 + + +def test_load_cell_budget_file_timeseries(example_data_path): + pth = example_data_path / "mf2005_test" / "swiex1.gitzta" + cbf = CellBudgetFile(pth, precision="single") + ts = cbf.get_ts(text="ZETASRF 1", idx=(0, 0, 24)) + assert ts.shape == (4, 2) + + +_example_data_path = get_example_data_path() + + +@pytest.mark.parametrize( + "path", + [ + _example_data_path / "mf2005_test" / "swiex1.gitzta", + _example_data_path / "mp6" / "EXAMPLE.BUD", + _example_data_path + / "mfusg_test" + / "01A_nestedgrid_nognc" + / "output" + / "flow.cbc", + ], +) +def test_budgetfile_detect_precision_single(path): + file = CellBudgetFile(path, precision="auto") + assert file.realtype == np.float32 + + +@pytest.mark.parametrize( + "path", + [ + _example_data_path + / "mf6" + / "test006_gwf3" + / "expected_output" + / "flow_adj.cbc", + ], +) +def test_budgetfile_detect_precision_double(path): + file = CellBudgetFile(path, precision="auto") + assert file.realtype == np.float64 + + def test_cellbudgetfile_position(function_tmpdir, zonbud_model_path): fpth = zonbud_model_path / "freyberg.gitcbc" v = CellBudgetFile(fpth) @@ -28,7 +379,7 @@ def test_cellbudgetfile_position(function_tmpdir, zonbud_model_path): assert ipos == ival, f"position of index 8767 header != {ival}" cbcd = [] - for i in range(idx, v.get_nrecords()): + for i in range(idx, len(v)): cbcd.append(v.get_data(i)[0]) v.close() @@ -49,15 +400,19 @@ def test_cellbudgetfile_position(function_tmpdir, zonbud_model_path): v2 = CellBudgetFile(opth, verbose=True) - try: - v2.list_records() - except: - assert False, f"could not list records on {opth}" + with pytest.deprecated_call(match="use headers instead"): + assert v2.list_records() is None + with pytest.deprecated_call(match=r"drop_duplicates\(\) instead"): + assert v2.list_unique_records() is None + with pytest.deprecated_call(match=r"drop_duplicates\(\) instead"): + assert v2.list_unique_packages(True) is None + with pytest.deprecated_call(match=r"drop_duplicates\(\) instead"): + assert v2.list_unique_packages(False) is None names = v2.get_unique_record_names(decode=True) cbcd2 = [] - for i in range(0, v2.get_nrecords()): + for i in range(len(v2)): cbcd2.append(v2.get_data(i)[0]) v2.close() @@ -280,10 +635,11 @@ def test_cellbudgetfile_reverse_mf6(example_data_path, function_tmpdir): assert isinstance(rf, CellBudgetFile) # check that both files have the same number of records - nrecords = f.get_nrecords() - assert nrecords == rf.get_nrecords() + assert len(f) == 2 + assert len(rf) == 2 # check data were reversed + nrecords = len(f) for idx in range(nrecords - 1, -1, -1): # check headers f_header = list(f.recordarray[nrecords - idx - 1]) @@ -306,3 +662,22 @@ def test_cellbudgetfile_reverse_mf6(example_data_path, function_tmpdir): else: # flows should be negated assert np.array_equal(f_data[0][0], -rf_data[0][0]) + + +def test_read_mf6_budgetfile(example_data_path): + cbb_file = ( + example_data_path + / "mf6" + / "test005_advgw_tidal" + / "expected_output" + / "AdvGW_tidal.cbc" + ) + cbb = CellBudgetFile(cbb_file) + rch_zone_1 = cbb.get_data(paknam2="rch-zone_1".upper()) + rch_zone_2 = cbb.get_data(paknam2="rch-zone_2".upper()) + rch_zone_3 = cbb.get_data(paknam2="rch-zone_3".upper()) + + # ensure there is a record for each time step + assert len(rch_zone_1) == 120 * 3 + 1 + assert len(rch_zone_2) == 120 * 3 + 1 + assert len(rch_zone_3) == 120 * 3 + 1 diff --git a/autotest/test_export.py b/autotest/test_export.py index fbfac0937..0028ada34 100644 --- a/autotest/test_export.py +++ b/autotest/test_export.py @@ -177,7 +177,7 @@ def unstructured_grid(example_data_path): ) -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) @pytest.mark.parametrize("pathlike", (True, False)) def test_output_helper_shapefile_export( pathlike, function_tmpdir, example_data_path @@ -202,7 +202,7 @@ def test_output_helper_shapefile_export( ) -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) @pytest.mark.slow def test_freyberg_export(function_tmpdir, example_data_path): # steady state @@ -296,7 +296,7 @@ def test_freyberg_export(function_tmpdir, example_data_path): assert part.read_text() == wkt -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) @pytest.mark.parametrize("missing_arrays", [True, False]) @pytest.mark.slow def test_disu_export(function_tmpdir, missing_arrays): @@ -353,7 +353,7 @@ def test_export_output(crs, function_tmpdir, example_data_path): assert read_crs == get_authority_crs(4326) -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) def test_write_gridlines_shapefile(function_tmpdir): import shapefile @@ -379,7 +379,7 @@ def test_write_gridlines_shapefile(function_tmpdir): assert len(sf) == 22 -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) def test_export_shapefile_polygon_closed(function_tmpdir): from shapefile import Reader @@ -410,7 +410,7 @@ def test_export_shapefile_polygon_closed(function_tmpdir): @excludes_platform("Windows") -@requires_pkg("rasterio", "shapefile", "scipy") +@requires_pkg("rasterio", "pyshp", "scipy", name_map={"pyshp": "shapefile"}) def test_export_array(function_tmpdir, example_data_path): import rasterio from scipy.ndimage import rotate @@ -443,12 +443,10 @@ def test_export_array(function_tmpdir, example_data_path): assert np.abs(val - m.modelgrid.extent[0]) < 1e-6 # ascii grid origin will differ if it was unrotated # without scipy.rotate - # assert np.abs(val - m.modelgrid.xoffset) < 1e-6 if "yllcorner" in line.lower(): val = float(line.strip().split()[-1]) assert np.abs(val - m.modelgrid.extent[2]) < 1e-6 # without scipy.rotate - # assert np.abs(val - m.modelgrid.yoffset) < 1e-6 if "cellsize" in line.lower(): val = float(line.strip().split()[-1]) rot_cellsize = ( @@ -501,7 +499,7 @@ def test_netcdf_classmethods(function_tmpdir, example_data_path): new_f.nc.close() -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) def test_shapefile_ibound(function_tmpdir, example_data_path): from shapefile import Reader @@ -524,7 +522,7 @@ def test_shapefile_ibound(function_tmpdir, example_data_path): shape.close() -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) @pytest.mark.slow @pytest.mark.parametrize("namfile", namfiles()) def test_shapefile(function_tmpdir, namfile): @@ -540,8 +538,6 @@ def test_shapefile(function_tmpdir, namfile): fnc_name = function_tmpdir / f"{model.name}.shp" fnc = model.export(fnc_name) - # fnc2 = m.export(fnc_name, package_names=None) - # fnc3 = m.export(fnc_name, package_names=['DIS']) s = Reader(fnc_name) assert ( @@ -549,7 +545,7 @@ def test_shapefile(function_tmpdir, namfile): ), f"wrong number of records in shapefile {fnc_name}" -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) @pytest.mark.slow @pytest.mark.parametrize("namfile", namfiles()) def test_shapefile_export_modelgrid_override(function_tmpdir, namfile): @@ -616,7 +612,7 @@ def test_export_netcdf(function_tmpdir, namfile): nc.close() -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) def test_export_array2(function_tmpdir): nrow = 7 ncol = 11 @@ -650,7 +646,7 @@ def test_export_array2(function_tmpdir): assert os.path.isfile(filename), "did not create array shapefile" -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_export_array_contours_structured(function_tmpdir): nrow = 7 ncol = 11 @@ -686,7 +682,7 @@ def test_export_array_contours_structured(function_tmpdir): assert os.path.isfile(filename), "did not create contour shapefile" -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_export_array_contours_unstructured( function_tmpdir, unstructured_grid ): @@ -712,7 +708,7 @@ def test_export_array_contours_unstructured( from autotest.test_gridgen import sim_disu_diff_layers -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_export_array_contours_unstructured_diff_layers( function_tmpdir, sim_disu_diff_layers ): @@ -741,7 +737,7 @@ def test_export_array_contours_unstructured_diff_layers( # plt.show() -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_export_contourf(function_tmpdir, example_data_path): from shapefile import Reader @@ -784,7 +780,7 @@ def test_export_contourf(function_tmpdir, example_data_path): @pytest.mark.mf6 -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_export_contours(function_tmpdir, example_data_path): from shapefile import Reader @@ -899,7 +895,6 @@ def cellid(k, i, j, nrow, ncol): spd6 = flopy.mf6.ModflowGwfriv.stress_period_data.empty( gwf, maxbound=len(spd) ) - # spd6[0]['cellid'] = cellid(spd.k, spd.i, spd.j, m.nrow, m.ncol) spd6[0]["cellid"] = list(zip(spd.k, spd.i, spd.j)) for c in spd.dtype.names: if c in spd6[0].dtype.names: @@ -907,7 +902,6 @@ def cellid(k, i, j, nrow, ncol): # MFTransient list apparently requires entries for additional stress periods, # even if they are the same spd6[1] = spd6[0] - # irch = np.zeros((nrow, ncol)) riv6 = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=spd6) rch6 = flopy.mf6.ModflowGwfrcha(gwf, recharge=rech) @@ -922,7 +916,6 @@ def cellid(k, i, j, nrow, ncol): if not has_pkg("shapefile"): return - # rch6.export('{}/mf6.shp'.format(baseDir)) m.export(function_tmpdir / "mfnwt.shp") gwf.export(function_tmpdir / "mf6.shp") @@ -952,7 +945,7 @@ def cellid(k, i, j, nrow, ncol): assert np.abs(it - it6) < 1e-6 -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) @pytest.mark.slow def test_export_huge_shapefile(function_tmpdir): nlay = 2 @@ -963,7 +956,6 @@ def test_export_huge_shapefile(function_tmpdir): perlen = 1 nstp = 1 tsmult = 1 - # perioddata = [[perlen, nstp, tsmult]] * 2 botm = np.zeros((nlay, nrow, ncol)) m = flopy.modflow.Modflow( @@ -1236,8 +1228,6 @@ def test_vtk_add_packages(function_tmpdir, example_data_path): # todo: pakbase.export() for vtk!!!! m.dis.export(ws, fmt="vtk", xml=True, binary=False) filetocheck = function_tmpdir / "DIS.vtk" - # totalbytes = os.path.getsize(filetocheck) - # assert(totalbytes==1019857) assert count_lines_in_file(filetocheck) == 27239 # upw with point scalar output @@ -1288,8 +1278,6 @@ def test_vtk_mf6(function_tmpdir, example_data_path): # check one filetocheck = function_tmpdir / "twrihfb2015_000000.vtk" - # totalbytes = os.path.getsize(filetocheck) - # assert(totalbytes==21609) assert count_lines_in_file(filetocheck) == 9537 @@ -1498,7 +1486,6 @@ def test_vtk_vertex(function_tmpdir, example_data_path): # disv test workspace = example_data_path / "mf6" / "test003_gwfs_disv" - # outfile = os.path.join("vtk_transient_test", "vtk_pacakages") sim = MFSimulation.load(sim_ws=workspace) gwf = sim.get_model("gwf_1") @@ -1731,51 +1718,51 @@ def test_vtk_export_disv1_model(function_tmpdir): idomain=np.ones((nlay, nrow, ncol)), ) - from flopy.utils.cvfdutil import gridlist_to_disv_gridprops - - gridprops = gridlist_to_disv_gridprops([mg]) - gridprops["top"] = 0 - gridprops["botm"] = np.zeros((nlay, nrow * ncol), dtype=float) - 1 - gridprops["nlay"] = nlay - - disv = ModflowGwfdisv(gwf, **gridprops) - ic = ModflowGwfic(gwf, strt=10) - npf = ModflowGwfnpf(gwf) - - # Export model without specifying packages_names parameter - # create the vtk output - gwf = sim.get_model() - vtkobj = Vtk(gwf, binary=False) - vtkobj.add_model(gwf) - f = function_tmpdir / "gwf.vtk" - vtkobj.write(f) - - # load the output using the vtk standard library - gridreader = vtkUnstructuredGridReader() - gridreader.SetFileName(str(f)) - gridreader.Update() - grid = gridreader.GetOutput() - - # get the points - vtk_points = grid.GetPoints() - vtk_points = vtk_points.GetData() - vtk_points = vtk_to_numpy(vtk_points) - # print(vtk_points) - - # get cell locations (ia format of point to cell relationship) - cell_locations = vtk_to_numpy(grid.GetCellLocationsArray()) - cell_locations_answer = np.array([0, 8, 16, 24, 32, 40, 48, 56, 64]) - print(f"Found cell locations {cell_locations} in vtk file.") - print(f"Expecting cell locations {cell_locations_answer}") - errmsg = "vtk cell locations do not match expected result." - assert np.allclose(cell_locations, cell_locations_answer), errmsg - - cell_types = vtk_to_numpy(grid.GetCellTypesArray()) - cell_types_answer = np.array(9 * [42]) - print(f"Found cell types {cell_types} in vtk file.") - print(f"Expecting cell types {cell_types_answer}") - errmsg = "vtk cell types do not match expected result." - assert np.allclose(cell_types, cell_types_answer), errmsg + with pytest.deprecated_call(): + from flopy.utils.cvfdutil import gridlist_to_disv_gridprops + + gridprops = gridlist_to_disv_gridprops([mg]) + gridprops["top"] = 0 + gridprops["botm"] = np.zeros((nlay, nrow * ncol), dtype=float) - 1 + gridprops["nlay"] = nlay + + disv = ModflowGwfdisv(gwf, **gridprops) + ic = ModflowGwfic(gwf, strt=10) + npf = ModflowGwfnpf(gwf) + + # Export model without specifying packages_names parameter + # create the vtk output + gwf = sim.get_model() + vtkobj = Vtk(gwf, binary=False) + vtkobj.add_model(gwf) + f = function_tmpdir / "gwf.vtk" + vtkobj.write(f) + + # load the output using the vtk standard library + gridreader = vtkUnstructuredGridReader() + gridreader.SetFileName(str(f)) + gridreader.Update() + grid = gridreader.GetOutput() + + # get the points + vtk_points = grid.GetPoints() + vtk_points = vtk_points.GetData() + vtk_points = vtk_to_numpy(vtk_points) + + # get cell locations (ia format of point to cell relationship) + cell_locations = vtk_to_numpy(grid.GetCellLocationsArray()) + cell_locations_answer = np.array([0, 8, 16, 24, 32, 40, 48, 56, 64]) + print(f"Found cell locations {cell_locations} in vtk file.") + print(f"Expecting cell locations {cell_locations_answer}") + errmsg = "vtk cell locations do not match expected result." + assert np.allclose(cell_locations, cell_locations_answer), errmsg + + cell_types = vtk_to_numpy(grid.GetCellTypesArray()) + cell_types_answer = np.array(9 * [42]) + print(f"Found cell types {cell_types} in vtk file.") + print(f"Expecting cell types {cell_types_answer}") + errmsg = "vtk cell types do not match expected result." + assert np.allclose(cell_types, cell_types_answer), errmsg @pytest.mark.mf6 @@ -1825,7 +1812,6 @@ def test_vtk_export_disv2_model(function_tmpdir): vtk_points = grid.GetPoints() vtk_points = vtk_points.GetData() vtk_points = vtk_to_numpy(vtk_points) - # print(vtk_points) # get cell locations (ia format of point to cell relationship) cell_locations = vtk_to_numpy(grid.GetCellLocationsArray()) @@ -2007,7 +1993,7 @@ def test_vtk_export_disu2_grid(function_tmpdir, example_data_path): @pytest.mark.mf6 @requires_exe("mf6", "gridgen") -@requires_pkg("vtk", "shapefile", "shapely") +@requires_pkg("vtk", "pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_vtk_export_disu_model(function_tmpdir): from vtkmodules.util.numpy_support import vtk_to_numpy @@ -2035,7 +2021,6 @@ def test_vtk_export_disu_model(function_tmpdir): vtk_points = grid.GetPoints() vtk_points = vtk_points.GetData() vtk_points = vtk_to_numpy(vtk_points) - # print(vtk_points) # get cell locations (ia format of point to cell relationship) cell_locations = vtk_to_numpy(grid.GetCellLocationsArray())[0:9] @@ -2061,3 +2046,21 @@ def test_vtk_export_disu_model(function_tmpdir): strt_vtk = vtk_to_numpy(grid.GetCellData().GetArray("strt")) if not np.allclose(gwf.ic.strt.array, strt_vtk): raise AssertionError("'strt' array not written in proper node order") + + +def test_to_shapefile_raises_attributeerror(): + # deprecated 3.2.4, changed to raise AttributeError version 3.8 + # these attributes and this test may eventually be removed + m = flopy.modflow.Modflow() + assert isinstance(m, flopy.mbase.BaseModel) + with pytest.raises(AttributeError, match="was removed"): + m.to_shapefile("nope.shp") + dis = flopy.modflow.ModflowDis(m) + assert isinstance(dis, flopy.pakbase.Package) + with pytest.raises(AttributeError, match="was removed"): + dis.to_shapefile("nope.shp") + wel = flopy.modflow.ModflowWel(m) + spd = wel.stress_period_data + assert isinstance(spd, flopy.utils.MfList) + with pytest.raises(AttributeError, match="was removed"): + spd.to_shapefile("nope.shp", kper=1) diff --git a/autotest/test_formattedfile.py b/autotest/test_formattedfile.py index 84cd88b5e..a7146398d 100644 --- a/autotest/test_formattedfile.py +++ b/autotest/test_formattedfile.py @@ -1,5 +1,6 @@ import matplotlib.pyplot as plt import numpy as np +import pandas as pd import pytest from matplotlib.axes import Axes @@ -11,6 +12,60 @@ def freyberg_model_path(example_data_path): return example_data_path / "freyberg" +def test_headfile_build_index(example_data_path): + # test low-level FormattedLayerFile._build_index() method + pth = example_data_path / "mf2005_test" / "test1tr.githds" + with FormattedHeadFile(pth) as hds: + pass + assert hds.nrow == 15 + assert hds.ncol == 10 + assert hds.nlay == 1 + assert not hasattr(hds, "nper") + assert hds.totalbytes == 1613 + assert len(hds.recordarray) == 1 + assert type(hds.recordarray) == np.ndarray + assert hds.recordarray.dtype == np.dtype( + [ + ("kstp", "i4"), + ("kper", "i4"), + ("pertim", "f4"), + ("totim", "f4"), + ("text", "S16"), + ("ncol", "i4"), + ("nrow", "i4"), + ("ilay", "i4"), + ] + ) + flt32time = np.float32(1577880000.0) + assert hds.recordarray.tolist() == [ + (50, 1, float(flt32time), float(flt32time), b"HEAD", 10, 15, 1) + ] + assert hds.times == [flt32time] + assert hds.kstpkper == [(50, 1)] + np.testing.assert_array_equal(hds.iposarray, [98]) + assert hds.iposarray.dtype == np.int64 + with pytest.deprecated_call(match="use headers instead"): + assert hds.list_records() is None + pd.testing.assert_frame_equal( + hds.headers, + pd.DataFrame( + [ + { + "kstp": np.int32(50), + "kper": np.int32(1), + "pertim": flt32time, + "totim": flt32time, + "text": "HEAD", + "ncol": np.int32(10), + "nrow": np.int32(15), + "ilay": np.int32(1), + } + ], + index=[98], + ), + ) + + def test_formattedfile_reference(example_data_path): h = FormattedHeadFile(example_data_path / "mf2005_test" / "test1tr.githds") assert isinstance(h, FormattedHeadFile) @@ -25,6 +80,12 @@ def test_formattedfile_read(function_tmpdir, example_data_path): h = FormattedHeadFile(mf2005_model_path / "test1tr.githds") assert isinstance(h, FormattedHeadFile) + # check number of records + assert len(h) == 1 + with pytest.deprecated_call(): + assert h.get_nrecords() == 1 + assert not hasattr(h, "nrecords") + times = h.get_times() assert np.isclose(times[0], 1577880064.0) diff --git a/autotest/test_geospatial_util.py b/autotest/test_geospatial_util.py index 3783fde2b..9132e1d1b 100644 --- a/autotest/test_geospatial_util.py +++ b/autotest/test_geospatial_util.py @@ -153,7 +153,7 @@ def test_import_geospatial_utils(): ) -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_geospatial_collection_load_shpfile(example_data_path): # with Path shp = example_data_path / "freyberg" / "gis" / "bedrock_outcrop_hole.shp" diff --git a/autotest/test_get_modflow.py b/autotest/test_get_modflow.py index 3a93aa7ec..d5dc4d679 100644 --- a/autotest/test_get_modflow.py +++ b/autotest/test_get_modflow.py @@ -116,18 +116,16 @@ def test_get_release(repo): tag = "latest" release = get_release(repo=repo, tag=tag) assets = release["assets"] - - expected_assets = ["linux.zip", "mac.zip", "win64.zip"] + expected_assets = ["linux.zip", "mac.zip", "macarm.zip", "win64.zip"] expected_ostags = [a.replace(".zip", "") for a in expected_assets] actual_assets = [asset["name"] for asset in assets] if repo == "modflow6": - # can remove if modflow6 releases follow asset name conventions followed in executables and nightly build repos + # can remove if modflow6 releases follow the same asset name + # convention used in the executables and nightly build repos assert {a.rpartition("_")[2] for a in actual_assets} >= { a for a in expected_assets if not a.startswith("win") } - elif repo == "modflow6-nightly-build": - expected_assets.append("macarm.zip") else: for ostag in expected_ostags: assert any( @@ -142,15 +140,13 @@ def test_select_bindir(bindir, function_tmpdir): pytest.skip(f"{expected_path} is not writable") selected = select_bindir(f":{bindir}") + # For some reason sys.prefix can return different python + # installs when invoked here and get_modflow.py on macOS. + # Work around by just comparing the end of the bin path, + # should be .../Python.framework/Versions//bin if system() != "Darwin": assert selected == expected_path else: - # for some reason sys.prefix can return different python - # installs when invoked here and get_modflow.py on macOS - # https://github.com/modflowpy/flopy/actions/runs/3331965840/jobs/5512345032#step:8:1835 - # - # work around by just comparing the end of the bin path - # should be .../Python.framework/Versions//bin assert selected.parts[-4:] == expected_path.parts[-4:] diff --git a/autotest/test_grid.py b/autotest/test_grid.py index d27337f37..142fed140 100644 --- a/autotest/test_grid.py +++ b/autotest/test_grid.py @@ -136,7 +136,6 @@ def test_get_vertices(): xgrid = mg.xvertices ygrid = mg.yvertices - # a1 = np.array(mg.xyvertices) a1 = np.array( [ [xgrid[0, 0], ygrid[0, 0]], @@ -225,9 +224,7 @@ def test_get_rc_from_node_coordinates(): delr = [0.5] * 5 + [2.0] * 5 nrow = 10 ncol = 10 - mfdis = ModflowDis( - mf, nrow=nrow, ncol=ncol, delr=delr, delc=delc - ) # , xul=50, yul=1000) + mfdis = ModflowDis(mf, nrow=nrow, ncol=ncol, delr=delr, delc=delc) ygrid, xgrid, zgrid = mfdis.get_node_coordinates() for i in range(nrow): for j in range(ncol): @@ -946,25 +943,27 @@ def test_tocvfd3(): yoff=200, idomain=idomain, ) - gridprops = gridlist_to_disv_gridprops([sg1, sg2]) - assert "ncpl" in gridprops - assert "nvert" in gridprops - assert "vertices" in gridprops - assert "cell2d" in gridprops - - ncpl = gridprops["ncpl"] - nvert = gridprops["nvert"] - vertices = gridprops["vertices"] - cell2d = gridprops["cell2d"] - assert ncpl == 121 - assert nvert == 148 - assert len(vertices) == nvert - assert len(cell2d) == 121 - - # spot check information for cell 28 (zero based) - answer = [28, 250.0, 150.0, 7, 38, 142, 143, 45, 46, 44, 38] - for i, j in zip(cell2d[28], answer): - assert i == j, f"{i} not equal {j}" + + with pytest.deprecated_call(): + gridprops = gridlist_to_disv_gridprops([sg1, sg2]) + assert "ncpl" in gridprops + assert "nvert" in gridprops + assert "vertices" in gridprops + assert "cell2d" in gridprops + + ncpl = gridprops["ncpl"] + nvert = gridprops["nvert"] + vertices = gridprops["vertices"] + cell2d = gridprops["cell2d"] + assert ncpl == 121 + assert nvert == 148 + assert len(vertices) == nvert + assert len(cell2d) == 121 + + # spot check information for cell 28 (zero based) + answer = [28, 250.0, 150.0, 7, 38, 142, 143, 45, 46, 44, 38] + for i, j in zip(cell2d[28], answer): + assert i == j, f"{i} not equal {j}" @requires_pkg("shapely") diff --git a/autotest/test_grid_cases.py b/autotest/test_grid_cases.py index 00c778a29..5f3e748e2 100644 --- a/autotest/test_grid_cases.py +++ b/autotest/test_grid_cases.py @@ -240,7 +240,6 @@ def voronoi_polygon(): [1330.11116, 1809.788273], [399.1804436, 2998.515188], [914.7728404, 5132.494831], - # [1831.381546, 6335.543757], ] poly = np.array(domain) max_area = 100.0**2 @@ -393,7 +392,6 @@ def voronoi_many_polygons(): y = radius * np.sin(theta) + 20.0 circle_poly1 = [(x, y) for x, y in zip(x, y)] tri.add_polygon(circle_poly1) - # tri.add_hole((70, 20)) # add line through domain to force conforming cells line = [(x, x) for x in np.linspace(11, 89, 100)] diff --git a/autotest/test_gridgen.py b/autotest/test_gridgen.py index 064bd7064..66a8a3a98 100644 --- a/autotest/test_gridgen.py +++ b/autotest/test_gridgen.py @@ -59,98 +59,88 @@ def get_structured_grid(): @requires_exe("gridgen") -@requires_pkg("shapefile") -# GRIDGEN seems not to like paths containing "[" or "]", as -# function_tmpdir does with parametrization, do it manually -# @pytest.mark.parametrize("grid_type", ["vertex", "unstructured"]) -def test_add_active_domain(function_tmpdir): # , grid_type): +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) +@pytest.mark.parametrize("grid_type", ["vertex", "unstructured"]) +def test_add_active_domain(function_tmpdir, grid_type): bgrid = get_structured_grid() - # test providing active domain various ways - for grid_type in ["vertex", "unstructured"]: - grids = [] - for feature in [ - [[[(0, 0), (0, 60), (40, 80), (60, 0), (0, 0)]]], - function_tmpdir / "ad0.shp", - function_tmpdir / "ad0", - "ad0.shp", - "ad0", - ]: - print( - "Testing add_active_domain() for", - grid_type, - "grid with features", - feature, - ) - gridgen = Gridgen(bgrid, model_ws=function_tmpdir) - gridgen.add_active_domain( - feature, - range(bgrid.nlay), - ) - gridgen.build() - grid = ( - VertexGrid(**gridgen.get_gridprops_vertexgrid()) - if grid_type == "vertex" - else UnstructuredGrid( - **gridgen.get_gridprops_unstructuredgrid() - ) - ) - grid.plot() - grids.append(grid) - # plt.show() - - assert grid.nnodes < bgrid.nnodes - assert not np.array_equal(grid.ncpl, bgrid.ncpl) - assert all(np.array_equal(grid.ncpl, g.ncpl) for g in grids) - assert all(grid.nnodes == g.nnodes for g in grids) + # test providing active domain in various ways + grids = [] + for feature in [ + [[[(0, 0), (0, 60), (40, 80), (60, 0), (0, 0)]]], + function_tmpdir / "ad0.shp", + function_tmpdir / "ad0", + "ad0.shp", + "ad0", + ]: + print( + "Testing add_active_domain() for", + grid_type, + "grid with features", + feature, + ) + gridgen = Gridgen(bgrid, model_ws=function_tmpdir) + gridgen.add_active_domain( + feature, + range(bgrid.nlay), + ) + gridgen.build() + grid = ( + VertexGrid(**gridgen.get_gridprops_vertexgrid()) + if grid_type == "vertex" + else UnstructuredGrid(**gridgen.get_gridprops_unstructuredgrid()) + ) + grid.plot() + grids.append(grid) + # plt.show() + + assert grid.nnodes < bgrid.nnodes + assert not np.array_equal(grid.ncpl, bgrid.ncpl) + assert all(np.array_equal(grid.ncpl, g.ncpl) for g in grids) + assert all(grid.nnodes == g.nnodes for g in grids) @requires_exe("gridgen") -@requires_pkg("shapefile") -# GRIDGEN seems not to like paths containing "[" or "]", as -# function_tmpdir does with parametrization, do it manually -# @pytest.mark.parametrize("grid_type", ["vertex", "unstructured"]) -def test_add_refinement_feature(function_tmpdir): # , grid_type): +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) +@pytest.mark.parametrize("grid_type", ["vertex", "unstructured"]) +def test_add_refinement_feature(function_tmpdir, grid_type): bgrid = get_structured_grid() - # test providing refinement feature various ways - for grid_type in ["vertex", "unstructured"]: - grids = [] - for features in [ - [[[(0, 0), (0, 60), (40, 80), (60, 0), (0, 0)]]], - function_tmpdir / "rf0.shp", - function_tmpdir / "rf0", - "rf0.shp", - "rf0", - ]: - print( - "Testing add_refinement_feature() for", - grid_type, - "grid with features", - features, - ) - gridgen = Gridgen(bgrid, model_ws=function_tmpdir) - gridgen.add_refinement_features( - features, - "polygon", - 1, - range(bgrid.nlay), - ) - gridgen.build() - grid = ( - VertexGrid(**gridgen.get_gridprops_vertexgrid()) - if grid_type == "vertex" - else UnstructuredGrid( - **gridgen.get_gridprops_unstructuredgrid() - ) - ) - grid.plot() - # plt.show() - - assert grid.nnodes > bgrid.nnodes - assert not np.array_equal(grid.ncpl, bgrid.ncpl) - assert all(np.array_equal(grid.ncpl, g.ncpl) for g in grids) - assert all(grid.nnodes == g.nnodes for g in grids) + # test providing refinement features in various ways + grids = [] + for features in [ + [[[(0, 0), (0, 60), (40, 80), (60, 0), (0, 0)]]], + function_tmpdir / "rf0.shp", + function_tmpdir / "rf0", + "rf0.shp", + "rf0", + ]: + print( + "Testing add_refinement_feature() for", + grid_type, + "grid with features", + features, + ) + gridgen = Gridgen(bgrid, model_ws=function_tmpdir) + gridgen.add_refinement_features( + features, + "polygon", + 1, + range(bgrid.nlay), + ) + gridgen.build() + grid = ( + VertexGrid(**gridgen.get_gridprops_vertexgrid()) + if grid_type == "vertex" + else UnstructuredGrid(**gridgen.get_gridprops_unstructuredgrid()) + ) + grid.plot() + # plt.show() + + assert grid.nnodes > bgrid.nnodes + assert not np.array_equal(grid.ncpl, bgrid.ncpl) + assert all(np.array_equal(grid.ncpl, g.ncpl) for g in grids) + assert all(grid.nnodes == g.nnodes for g in grids) @pytest.mark.slow @@ -364,7 +354,7 @@ def sim_disu_diff_layers(function_tmpdir): @pytest.mark.slow @requires_exe("mf6", "gridgen") -@requires_pkg("shapely", "shapefile") +@requires_pkg("shapely", "pyshp", name_map={"pyshp": "shapefile"}) def test_mf6disu(sim_disu_diff_layers): sim = sim_disu_diff_layers ws = sim.sim_path @@ -474,7 +464,7 @@ def test_mf6disu(sim_disu_diff_layers): @pytest.mark.slow @requires_exe("mfusg", "gridgen") -@requires_pkg("shapely", "shapefile") +@requires_pkg("shapely", "pyshp", name_map={"pyshp": "shapefile"}) def test_mfusg(function_tmpdir): from shapely.geometry import Polygon @@ -839,7 +829,6 @@ def test_gridgen(function_tmpdir): ) == 0 ), msg - # ms_u.disu.write_file() # test mfusg without vertical pass-through gu.vertical_pass_through = False @@ -855,7 +844,7 @@ def test_gridgen(function_tmpdir): @requires_exe("mf6", "gridgen") -@requires_pkg("shapely", "shapefile") +@requires_pkg("shapely", "pyshp", name_map={"pyshp": "shapefile"}) def test_flopy_issue_1492(function_tmpdir): """ Submitted by David Brakenhoff in diff --git a/autotest/test_gridintersect.py b/autotest/test_gridintersect.py index dc7a7656f..268de09f9 100644 --- a/autotest/test_gridintersect.py +++ b/autotest/test_gridintersect.py @@ -460,9 +460,6 @@ def test_rect_grid_linestring_in_and_out_of_cell2(): LineString([(5, 15), (5.0, 9), (15.0, 5.0), (5.0, 1.0)]) ) assert len(result) == 3 - # assert result.cellids[0] == (1, 0) - # assert result.cellids[1] == (1, 1) - # assert np.allclose(result.lengths.sum(), 21.540659228538015) @requires_pkg("shapely") @@ -1214,10 +1211,10 @@ def test_point_offset_rot_structured_grid(): @requires_pkg("shapely") def test_linestring_offset_rot_structured_grid(): sgr = get_rect_grid(angrot=45.0, xyoffset=10.0) - ls = LineString([(5, 10.0 + np.sqrt(200.0)), (15, 10.0 + np.sqrt(200.0))]) + ls = LineString([(5, 25), (15, 25)]) ix = GridIntersect(sgr, method="structured") result = ix.intersect(ls) - assert len(result) == 2 + assert len(result) == 3 # check empty result when using local model coords ix = GridIntersect(sgr, method="structured", local=True) result = ix.intersect(ls) @@ -1452,6 +1449,114 @@ def test_raster_sampling_methods(example_data_path): ) +@requires_pkg("rasterio") +def test_raster_reprojection(example_data_path): + ws = example_data_path / "options" / "dem" + raster_name = "dem.img" + + wgs_epsg = 4326 + wgs_xmin = -120.32116799649168 + wgs_ymax = 39.46620605907534 + + raster = Raster.load(ws / raster_name) + + print(raster.crs.to_epsg()) + wgs_raster = raster.to_crs(crs=f"EPSG:{wgs_epsg}") + + if not wgs_raster.crs.to_epsg() == wgs_epsg: + raise AssertionError(f"Raster not converted to EPSG {wgs_epsg}") + + transform = wgs_raster._meta["transform"] + if not np.isclose(transform.c, wgs_xmin) and not np.isclose( + transform.f, wgs_ymax + ): + raise AssertionError(f"Raster not reprojected to EPSG {wgs_epsg}") + + raster.to_crs(epsg=wgs_epsg, inplace=True) + transform2 = raster._meta["transform"] + for ix, val in enumerate(transform): + if not np.isclose(val, transform2[ix]): + raise AssertionError("In place reprojection not working") + + +@requires_pkg("rasterio") +def test_create_raster_from_array_modelgrid(example_data_path): + ws = example_data_path / "options" / "dem" + raster_name = "dem.img" + + raster = Raster.load(ws / raster_name) + + xsize = 200 + ysize = 100 + xmin, xmax, ymin, ymax = raster.bounds + + nbands = 5 + nlay = 1 + nrow = int(np.floor((ymax - ymin) / ysize)) + ncol = int(np.floor((xmax - xmin) / xsize)) + + delc = np.full((nrow,), ysize) + delr = np.full((ncol,), xsize) + + grid = flopy.discretization.StructuredGrid( + delc=delc, + delr=delr, + top=np.ones((nrow, ncol)), + botm=np.zeros((nlay, nrow, ncol)), + idomain=np.ones((nlay, nrow, ncol), dtype=int), + xoff=xmin, + yoff=ymin, + crs=raster.crs, + ) + + array = np.random.random((grid.ncpl * nbands,)) * 100 + robj = Raster.raster_from_array(array, grid) + + if nbands != len(robj.bands): + raise AssertionError("Number of raster bands is incorrect") + + array = array.reshape((nbands, nrow, ncol)) + for band in robj.bands: + ra = robj.get_array(band) + np.testing.assert_allclose( + array[band - 1], + ra, + err_msg="Array not properly reshaped or converted to raster", + ) + + +@requires_pkg("rasterio", "affine") +def test_create_raster_from_array_transform(example_data_path): + import affine + + ws = example_data_path / "options" / "dem" + raster_name = "dem.img" + + raster = Raster.load(ws / raster_name) + + transform = raster._meta["transform"] + array = raster.get_array(band=raster.bands[0]) + + array = np.expand_dims(array, axis=0) + # same location but shrink raster by factor 2 + new_transform = affine.Affine( + transform.a / 2, 0, transform.c, 0, transform.e / 2, transform.f + ) + + robj = Raster.raster_from_array( + array, crs=raster.crs, transform=new_transform + ) + + rxmin, rxmax, rymin, rymax = robj.bounds + xmin, xmax, ymin, ymax = raster.bounds + + if ( + not ((xmax - xmin) / (rxmax - rxmin)) == 2 + or not ((ymax - ymin) / (rymax - rymin)) == 2 + ): + raise AssertionError("Transform based raster not working properly") + + if __name__ == "__main__": sgr = get_rect_grid(angrot=45.0, xyoffset=10.0) ls = LineString([(5, 10.0 + np.sqrt(200.0)), (15, 10.0 + np.sqrt(200.0))]) diff --git a/autotest/test_headufile.py b/autotest/test_headufile.py index f275d5ff7..e00f5106c 100644 --- a/autotest/test_headufile.py +++ b/autotest/test_headufile.py @@ -57,7 +57,6 @@ def mfusg_model(module_tmpdir): ic = ra["nodenumber"][0] chdspd.append([ic, head, head]) - # gridprops = g.get_gridprops() gridprops = g.get_gridprops_disu5() # create the mfusg modoel diff --git a/autotest/test_lake_connections.py b/autotest/test_lake_connections.py index 40322b0ab..71bab3d00 100644 --- a/autotest/test_lake_connections.py +++ b/autotest/test_lake_connections.py @@ -216,7 +216,7 @@ def test_lake(function_tmpdir, example_data_path): # mm.plot_array(bot_tm) # determine a reasonable lake bottom - idx = np.where(lakes > -1) + idx = np.asarray(lakes > -1).nonzero() lak_bot = bot_tm[idx].max() + 2.0 # interpolate top elevations @@ -241,9 +241,6 @@ def test_lake(function_tmpdir, example_data_path): gwf.dis.top = top_tm gwf.dis.botm = bot_tm.reshape(gwf.modelgrid.shape) - # v = gwf.dis.top.array - # v = gwf.dis.botm.array - k11_tm = k11.resample_to_grid( gwf.modelgrid, band=k11.bands[0], @@ -637,9 +634,9 @@ def test_embedded_lak_prudic_mixed(example_data_path): lake_map[0, :, :] = lakibd[:, :] - 1 lakebed_leakance = np.zeros(shape2d, dtype=object) - idx = np.where(lake_map[0, :, :] == 0) + idx = np.asarray(lake_map[0, :, :] == 0).nonzero() lakebed_leakance[idx] = "none" - idx = np.where(lake_map[0, :, :] == 1) + idx = np.asarray(lake_map[0, :, :] == 1).nonzero() lakebed_leakance[idx] = 1.0 lakebed_leakance = lakebed_leakance.tolist() diff --git a/autotest/test_lgrutil.py b/autotest/test_lgrutil.py index 439e4d5e6..67d7a5325 100644 --- a/autotest/test_lgrutil.py +++ b/autotest/test_lgrutil.py @@ -1,6 +1,6 @@ import numpy as np -from flopy.utils.lgrutil import Lgr +from flopy.utils.lgrutil import Lgr, LgrToDisv def test_lgrutil(): @@ -155,3 +155,75 @@ def test_lgrutil2(): ] assert np.allclose(lgr.delr, answer), f"{lgr.delr} /= {answer}" assert np.allclose(lgr.delc, answer), f"{lgr.delc} /= {answer}" + + +def test_lgrutil3(): + # Define parent grid information + xoffp = 0.0 + yoffp = 0.0 + nlayp = 3 + nrowp = 3 + ncolp = 3 + + dx = 100.0 + dy = 100.0 + dz = 10.0 + delrp = dx * np.ones(ncolp) + delcp = dy * np.ones(nrowp) + topp = dz * np.ones((nrowp, ncolp), dtype=float) + botmp = np.empty((nlayp, nrowp, ncolp), dtype=float) + for k in range(nlayp): + botmp[k] = -(k + 1) * dz + idomainp = np.ones((nlayp, nrowp, ncolp), dtype=int) + idomainp[:, nrowp // 2, ncolp // 2] = 0 + ncpp = 3 + ncppl = nlayp * [1] + lgr = Lgr( + nlayp, + nrowp, + ncolp, + delrp, + delcp, + topp, + botmp, + idomainp, + ncpp=ncpp, + ncppl=ncppl, + xllp=xoffp, + yllp=yoffp, + ) + + # check to make sure gridprops is accessible from lgr + gridprops = lgr.to_disv_gridprops() + assert "ncpl" in gridprops + assert "nvert" in gridprops + assert "vertices" in gridprops + assert "nlay" in gridprops + assert "top" in gridprops + assert "botm" in gridprops + assert gridprops["ncpl"] == 17 + assert gridprops["nvert"] == 32 + assert gridprops["nlay"] == 3 + + # test the lgr to disv class + lgrtodisv = LgrToDisv(lgr) + + # test guts of LgrToDisv to make sure hanging vertices added correctly + assert lgrtodisv.right_face_hanging[(1, 0)] == [0, 4, 8, 12] + assert lgrtodisv.left_face_hanging[(1, 2)] == [3, 7, 11, 15] + assert lgrtodisv.back_face_hanging[(2, 1)] == [12, 13, 14, 15] + assert lgrtodisv.front_face_hanging[(0, 1)] == [0, 1, 2, 3] + + assert lgrtodisv.iverts[1] == [1, 2, 6, 18, 17, 5] + assert lgrtodisv.iverts[3] == [4, 5, 20, 24, 9, 8] + assert lgrtodisv.iverts[4] == [6, 7, 11, 10, 27, 23] + assert lgrtodisv.iverts[6] == [9, 29, 30, 10, 14, 13] + + assert np.allclose(gridprops["top"], dz * np.ones((17,))) + + assert gridprops["botm"].shape == (3, 17) + b = np.empty((3, 17)) + b[0] = -dz + b[1] = -2 * dz + b[2] = -3 * dz + assert np.allclose(gridprops["botm"], b) diff --git a/autotest/test_mf6.py b/autotest/test_mf6.py index 0e09c8258..6401fa8fb 100644 --- a/autotest/test_mf6.py +++ b/autotest/test_mf6.py @@ -102,7 +102,7 @@ def write_head( ("kper", "i4"), ("pertim", "f8"), ("totim", "f8"), - ("text", "a16"), + ("text", "S16"), ("ncol", "i4"), ("nrow", "i4"), ("ilay", "i4"), @@ -763,7 +763,7 @@ def test_vor_binary_write(function_tmpdir, layered): "filename": "recharge.bin", "binary": True, "iprn": 1, - "data": np.full(vor.ncpl, 0.000001, dtype=float), # 0.000001, + "data": np.full(vor.ncpl, 0.000001, dtype=float), }, } chd_data = [ @@ -2245,6 +2245,9 @@ def test_multi_model(function_tmpdir): assert rec_array[0][3] == model_names[1] assert rec_array[1][1] == "transport.ims" assert rec_array[1][2] == model_names[2] + assert gwf1.get_ims_package() is gwf2.get_ims_package() + assert gwf1.get_ims_package().filename == "flow.ims" + assert gwt.get_ims_package().filename == "transport.ims" # test ssm fileinput gwt2 = sim2.get_model("gwt_model_1") ssm2 = gwt2.get_package("ssm") @@ -2417,3 +2420,55 @@ def test_remove_model(function_tmpdir, example_data_path): elif exg_index > 0: assert "end exchanges" in l break + + +def test_flopy_2283(function_tmpdir): + # create triangular grid + triangle_ws = function_tmpdir / "triangle" + triangle_ws.mkdir() + + active_area = [(0, 0), (0, 1000), (1000, 1000), (1000, 0)] + tri = Triangle(model_ws=triangle_ws, angle=30) + tri.add_polygon(active_area) + tri.add_region((1, 1), maximum_area=50**2) + + tri.build() + + # build vertex grid object + vgrid = flopy.discretization.VertexGrid( + vertices=tri.get_vertices(), + cell2d=tri.get_cell2d(), + xoff=199000, + yoff=215500, + crs=31370, + angrot=30, + ) + + # coord info is set (also correct when using vgrid.set_coord_info() + print(vgrid) + + # create MODFLOW 6 model + ws = function_tmpdir / "model" + ws.mkdir() + sim = flopy.mf6.MFSimulation(sim_name="prj-test", sim_ws=ws) + tdis = flopy.mf6.ModflowTdis(sim) + ims = flopy.mf6.ModflowIms(sim) + + gwf = flopy.mf6.ModflowGwf(sim, modelname="gwf") + disv = flopy.mf6.ModflowGwfdisv( + gwf, + xorigin=vgrid.xoffset, + yorigin=vgrid.yoffset, + angrot=vgrid.angrot, # no CRS info can be set in DISV + nlay=1, + top=0.0, + botm=-10.0, + ncpl=vgrid.ncpl, + nvert=vgrid.nvert, + cell2d=vgrid.cell2d, + vertices=tri.get_vertices(), # this is not stored in the Vertex grid object? + ) + + assert gwf.modelgrid.xoffset == disv.xorigin.get_data() + assert gwf.modelgrid.yoffset == disv.yorigin.get_data() + assert gwf.modelgrid.angrot == disv.angrot.get_data() diff --git a/autotest/test_mfsimlist.py b/autotest/test_mfsimlist.py index 29ba1ce39..88f9a397b 100644 --- a/autotest/test_mfsimlist.py +++ b/autotest/test_mfsimlist.py @@ -160,7 +160,4 @@ def test_mfsimlist_memory_all(mem_option, function_tmpdir): total = 0.0 for key, value in mem_dict.items(): total += value["MEMORYSIZE"] - # total_ = mfsimlst.get_memory_usage(units=units) - # diff = total_ - total - # percent_diff = 100.0 * diff / total_ assert total > 0.0, "memory is not greater than zero" diff --git a/autotest/test_modflow.py b/autotest/test_modflow.py index c421869d5..5239ce4c6 100644 --- a/autotest/test_modflow.py +++ b/autotest/test_modflow.py @@ -555,8 +555,6 @@ def test_namfile_readwrite(function_tmpdir, example_data_path): delr=m.dis.delr.array, top=m.dis.top.array, botm=m.dis.botm.array, - # lenuni=3, - # length_multiplier=.3048, xoff=xll, yoff=yll, angrot=30, @@ -593,7 +591,6 @@ def test_namfile_readwrite(function_tmpdir, example_data_path): def test_read_usgs_model_reference(function_tmpdir, model_reference_path): nlay, nrow, ncol = 1, 30, 5 delr, delc = 250, 500 - # xll, yll = 272300, 5086000 mrf_path = function_tmpdir / model_reference_path.name shutil.copy(model_reference_path, mrf_path) @@ -782,32 +779,6 @@ def test_mflist_external(function_tmpdir): ml1.write_input() - # ml = Modflow( - # "mflist_test", - # model_ws=str(function_tmpdir), - # external_path=str(function_tmpdir / "ref"), - # ) - # dis = ModflowDis(ml, 1, 10, 10, nper=3, perlen=1.0) - # wel_data = { - # 0: [[0, 0, 0, -1], [1, 1, 1, -1]], - # 1: [[0, 0, 0, -2], [1, 1, 1, -1]], - # } - # wel = ModflowWel(ml, stress_period_data=wel_data) - # ml.write_input() - - # ml1 = Modflow.load( - # "mflist_test.nam", - # model_ws=ml.model_ws, - # verbose=True, - # forgive=False, - # check=False, - # ) - - # assert np.array_equal(ml.wel[0], ml1.wel[0]) - # assert np.array_equal(ml.wel[1], ml1.wel[1]) - - # ml1.write_input() - @excludes_platform("windows", ci_only=True) def test_single_mflist_entry_load(function_tmpdir, example_data_path): @@ -1111,7 +1082,6 @@ def test_default_oc_stress_period_data(function_tmpdir): lpf = ModflowLpf(m, ipakcb=100) wel_data = {0: [[0, 0, 0, -1000.0]]} wel = ModflowWel(m, ipakcb=101, stress_period_data=wel_data) - # spd = {(0, 0): ['save head', 'save budget']} oc = ModflowOc(m, stress_period_data=None) spd_oc = oc.stress_period_data tups = list(spd_oc.keys()) @@ -1269,10 +1239,6 @@ def test_load_with_list_reader(function_tmpdir): welra.tofile(f) welra.tofile(f) - # no need to run the model - # success, buff = m.run_model(silent=True) - # assert success, 'model did not terminate successfully' - # the m2 model will load all of these external files, possibly using sfac # and just create regular list input files for wel, drn, and ghb fname = "original.nam" diff --git a/autotest/test_modpathfile.py b/autotest/test_modpathfile.py index 32a0531ef..e2e46bae1 100644 --- a/autotest/test_modpathfile.py +++ b/autotest/test_modpathfile.py @@ -313,7 +313,7 @@ def test_get_destination_endpoint_data( @pytest.mark.parametrize("longfieldname", [True, False]) @requires_exe("mf6", "mp7") -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_write_shapefile(function_tmpdir, mp7_small, longfieldname): from shapefile import Reader diff --git a/autotest/test_mp6.py b/autotest/test_mp6.py index 73935a1d4..ec1f22493 100644 --- a/autotest/test_mp6.py +++ b/autotest/test_mp6.py @@ -131,7 +131,7 @@ def test_mpsim(function_tmpdir, mp6_test_path): assert stllines[6].strip().split()[-1] == "p2" -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_get_destination_data(function_tmpdir, mp6_test_path): copy_modpath_files(mp6_test_path, function_tmpdir, "EXAMPLE.") copy_modpath_files(mp6_test_path, function_tmpdir, "EXAMPLE-3.") @@ -175,7 +175,7 @@ def test_get_destination_data(function_tmpdir, mp6_test_path): np.array(well_pthld)[["k", "i", "j"]].tolist(), dtype=starting_locs.dtype, ) - assert np.all(np.in1d(starting_locs, pathline_locs)) + assert np.all(np.isin(starting_locs, pathline_locs)) # test writing a shapefile of endpoints epd.write_shapefile( @@ -314,9 +314,6 @@ def test_loadtxt(function_tmpdir, mp6_test_path): ) assert np.array_equal(ra, ra2) - # epfilewithnans = os.path.join('../examples/data/mp6/', 'freybergmp.mpend') - # epd = EndpointFile(epfilewithnans) - @requires_exe("mf2005") def test_modpath(function_tmpdir, example_data_path): diff --git a/autotest/test_mp7.py b/autotest/test_mp7.py index f763e577b..00ba238f0 100644 --- a/autotest/test_mp7.py +++ b/autotest/test_mp7.py @@ -3,8 +3,9 @@ import matplotlib.pyplot as plt import numpy as np +import pandas as pd import pytest -from modflow_devtools.markers import requires_exe, requires_pkg +from modflow_devtools.markers import requires_exe from autotest.test_mp7_cases import Mp7Cases from flopy.mf6 import ( @@ -718,47 +719,15 @@ def ex01_mf6_model(function_tmpdir): return sim, function_tmpdir -@pytest.mark.slow @requires_exe("mf6", "mp7") -def test_forward(ex01_mf6_model): - sim, function_tmpdir = ex01_mf6_model - # Run the simulation - success, buff = sim.run_simulation() - assert success, "mf6 model did not run" - - mpnam = f"{ex01_mf6_model_name}_mp_forward" - - # load the MODFLOW 6 model - sim = MFSimulation.load("mf6mod", "mf6", "mf6", function_tmpdir) - gwf = sim.get_model(ex01_mf6_model_name) - - mp = Modpath7.create_mp7( - modelname=mpnam, - trackdir="forward", - flowmodel=gwf, - exe_name="mp7", - model_ws=function_tmpdir, - rowcelldivisions=1, - columncelldivisions=1, - layercelldivisions=1, - ) - - # write modpath datasets - mp.write_input() - - # run modpath - success, buff = mp.run_model() - assert success, f"mp7 model ({mp.name}) did not run" - - +@pytest.mark.parametrize("direction", ["forward", "backward"]) @pytest.mark.slow -@requires_exe("mf6", "mp7") -def test_backward(ex01_mf6_model): +def test_basic_mp7_model(ex01_mf6_model, direction): sim, function_tmpdir = ex01_mf6_model success, buff = sim.run_simulation() - assert success, "mf6 model did not run" + assert success, buff - mpnam = f"{ex01_mf6_model_name}_mp_backward" + mpnam = f"{ex01_mf6_model_name}_mp_{direction}" # load the MODFLOW 6 model sim = MFSimulation.load("mf6mod", "mf6", "mf6", function_tmpdir) @@ -766,7 +735,7 @@ def test_backward(ex01_mf6_model): mp = Modpath7.create_mp7( modelname=mpnam, - trackdir="backward", + trackdir=direction, flowmodel=gwf, exe_name="mp7", model_ws=function_tmpdir, @@ -784,110 +753,42 @@ def test_backward(ex01_mf6_model): @requires_exe("mf2005", "mf6", "mp7") -def test_pathline_output(function_tmpdir): - case_mf2005 = Mp7Cases.mp7_mf2005(function_tmpdir) - case_mf6 = Mp7Cases.mp7_mf6(function_tmpdir) - - case_mf2005.write_input() - success, buff = case_mf2005.run_model() - assert success, f"modpath model ({case_mf2005.name}) did not run" - - case_mf6.write_input() - success, buff = case_mf6.run_model() - assert success, f"modpath model ({case_mf6.name}) did not run" - - fpth0 = Path(case_mf2005.model_ws) / "ex01_mf2005_mp.mppth" - p = PathlineFile(fpth0) - maxtime0 = p.get_maxtime() - maxid0 = p.get_maxid() - p0 = p.get_alldata() - fpth1 = Path(case_mf6.model_ws) / "ex01_mf6_mp.mppth" - p = PathlineFile(fpth1) - maxtime1 = p.get_maxtime() - maxid1 = p.get_maxid() - p1 = p.get_alldata() - - # check maxid - msg = ( - f"pathline maxid ({maxid0}) in {os.path.basename(fpth0)} are not " - f"equal to the pathline maxid ({maxid1}) in {os.path.basename(fpth1)}" - ) - assert maxid0 == maxid1, msg - - -@requires_exe("mf2005", "mf6", "mp7") -def test_endpoint_output(function_tmpdir): - case_mf2005 = Mp7Cases.mp7_mf2005(function_tmpdir) - case_mf6 = Mp7Cases.mp7_mf6(function_tmpdir) - - case_mf2005.write_input() - success, buff = case_mf2005.run_model() - assert success, f"modpath model ({case_mf2005.name}) did not run" - - case_mf6.write_input() - success, buff = case_mf6.run_model() - assert success, f"modpath model ({case_mf6.name}) did not run" - - # if models not run then there will be no output - fpth0 = Path(case_mf2005.model_ws) / "ex01_mf2005_mp.mpend" - e = EndpointFile(fpth0) - maxtime0 = e.get_maxtime() - maxid0 = e.get_maxid() - maxtravel0 = e.get_maxtraveltime() - e0 = e.get_alldata() - fpth1 = Path(case_mf6.model_ws) / "ex01_mf6_mp.mpend" - e = EndpointFile(fpth1) - maxtime1 = e.get_maxtime() - maxid1 = e.get_maxid() - maxtravel1 = e.get_maxtraveltime() - e1 = e.get_alldata() - - # check maxid - msg = ( - f"endpoint maxid ({maxid0}) in {os.path.basename(fpth0)} are not " - f"equal to the endpoint maxid ({maxid1}) in {os.path.basename(fpth1)}" - ) - assert maxid0 == maxid1, msg - - # check that endpoint data are approximately the same - names = ["x", "y", "z", "x0", "y0", "z0"] - dtype = np.dtype( - [ - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("x0", np.float32), - ("y0", np.float32), - ("z0", np.float32), - ] - ) - d = np.rec.fromarrays((e0[name] - e1[name] for name in names), dtype=dtype) - msg = ( - f"endpoints in {os.path.basename(fpth0)} are not equal (within 1e-5) " - f"to the endpoints in {os.path.basename(fpth1)}" - ) - # assert not np.allclose(t0, t1), msg - - -@requires_exe("mf6") -def test_pathline_plotting(function_tmpdir): - ml = Mp7Cases.mp7_mf6(function_tmpdir) - ml.write_input() - success, buff = ml.run_model() - assert success, f"modpath model ({ml.name}) did not run" - - modelgrid = ml.flowmodel.modelgrid +@pytest.mark.parametrize("case", ["mf2005", "mf6"]) +def test_mp7_output(function_tmpdir, case, array_snapshot): + # build model + if case == "mf2005": + model = Mp7Cases.mp7_mf2005(function_tmpdir) + else: + model = Mp7Cases.mp7_mf6(function_tmpdir) + + # write and run model + model.write_input() + success, buff = model.run_model() + assert success, buff + + # check pathline output files + pathline_file = Path(model.model_ws) / f"ex01_{case}_mp.mppth" + p = PathlineFile(pathline_file) + assert p.get_maxid() == 22 + pathlines = p.get_alldata() + assert len(pathlines) == 23 + pathlines = pd.DataFrame(np.concatenate(pathlines)) + assert pathlines.particleid.nunique() == 23 + assert array_snapshot == pathlines.round(3).to_records(index=False) + + # check endpoint output files + endpoint_file = Path(model.model_ws) / f"ex01_{case}_mp.mpend" + e = EndpointFile(endpoint_file) + assert e.get_maxid() == 22 + endpoints = e.get_alldata() + assert len(endpoints) == 23 + + modelgrid = model.flowmodel.modelgrid nodes = list(range(modelgrid.nnodes)) - - fpth1 = Path(ml.model_ws) / "ex01_mf6_mp.mppth" - p = PathlineFile(fpth1) - p1 = p.get_alldata() - pls = p.get_destination_data(nodes) - pmv = PlotMapView(modelgrid=modelgrid, layer=0) pmv.plot_grid() - linecol = pmv.plot_pathline(pls, layer="all") - linecol2 = pmv.plot_pathline(p1, layer="all") + linecol = pmv.plot_pathline(p.get_destination_data(nodes), layer="all") + linecol2 = pmv.plot_pathline(p.get_alldata(), layer="all") if not len(linecol._paths) == len(linecol2._paths): raise AssertionError( "plot_pathline not properly splitting particles from recarray" @@ -896,7 +797,7 @@ def test_pathline_plotting(function_tmpdir): @requires_exe("mf6", "mp7") -def test_mp7sim_replacement(function_tmpdir, capfd): +def test_mp7sim_replacement(function_tmpdir): mf6sim = Mp7Cases.mf6(function_tmpdir) mf6sim.write_simulation() mf6sim.run_simulation() @@ -948,4 +849,52 @@ def test_mp7sim_replacement(function_tmpdir, capfd): mp.write_input() success, buff = mp.run_model() - assert success, f"modpath model ({mp.name}) did not run" + assert success, buff + + +@requires_exe("mf6", "mp7") +def test_flopy_2223(function_tmpdir): + mf6sim = Mp7Cases.mf6(function_tmpdir) + mf6sim.get_model().get_package("ic").strt = 0 + mf6sim.write_simulation() + mf6sim.run_simulation() + + # create mp7 model + mp = Modpath7( + modelname=f"{mf6sim.name}_mp", + flowmodel=mf6sim.get_model(mf6sim.name), + exe_name="mp7", + model_ws=mf6sim.sim_path, + ) + defaultiface6 = {"RCH": 6, "EVT": 6} + mpbas = Modpath7Bas(mp, porosity=0.1, defaultiface=defaultiface6) + part0 = ParticleData([(0, 0, 0)], structured=True, particleids=[0]) + pg0 = ParticleGroup( + particlegroupname="PG1", particledata=part0, filename="ex01a.sloc" + ) + mpsim = Modpath7Sim( + mp, + simulationtype="combined", + trackingdirection="forward", + weaksinkoption="pass_through", + weaksourceoption="pass_through", + budgetoutputoption="summary", + budgetcellnumbers=[1049, 1259], + traceparticledata=[1, 1000], + referencetime=[0, 0, 0.0], + stoptimeoption="extend", + timepointdata=[500, 1000.0], + zonedataoption="on", + zones=Mp7Cases.zones, + particlegroups=Mp7Cases.particlegroups, + ) + + mp.write_input() + success, buff = mp.run_model() + assert success, buff + + pathline_file = Path(mp.model_ws) / "ex01_mf6_mp.mppth" + p = PathlineFile(pathline_file) + pathlines = p.get_alldata() + assert len(pathlines) == 2 + assert all(len(pl) > 0 for pl in pathlines) diff --git a/autotest/test_mp7_cases.py b/autotest/test_mp7_cases.py index a85cdfe53..27a8f7ec3 100644 --- a/autotest/test_mp7_cases.py +++ b/autotest/test_mp7_cases.py @@ -65,7 +65,7 @@ class Mp7Cases: ) v = [(0,), (400,)] - pids = [1, 2] # [1000, 1001] + pids = [1, 2] part1 = ParticleData(v, structured=False, drape=1, particleids=pids) pg1 = ParticleGroup( particlegroupname="PG2", particledata=part1, filename="ex01a.pg2.sloc" diff --git a/autotest/test_mt3d.py b/autotest/test_mt3d.py index 27dfafff9..313809cc2 100644 --- a/autotest/test_mt3d.py +++ b/autotest/test_mt3d.py @@ -287,6 +287,7 @@ def test_mf2000_zeroth(function_tmpdir, mf2kmt3d_model_path): assert success, f"{mt.name} did not run" +@pytest.mark.slow @flaky(max_runs=3) @requires_exe("mfnwt", "mt3dms") @excludes_platform( diff --git a/autotest/test_particledata.py b/autotest/test_particledata.py index ed1f167ab..f3032cb6e 100644 --- a/autotest/test_particledata.py +++ b/autotest/test_particledata.py @@ -83,7 +83,7 @@ def test_particledata_structured_ctor_with_partlocs_as_list_of_tuples(): assert isinstance(data.particledata, pd.DataFrame) assert np.array_equal( data.particledata.to_records(index=False), - np.core.records.fromrecords( + np.rec.fromrecords( [ (0, 1, 1, 0.5, 0.5, 0.5, 0.0, 0), (0, 1, 2, 0.5, 0.5, 0.5, 0.0, 0), @@ -102,7 +102,7 @@ def test_particledata_structured_ctor_with_partlocs_as_ndarray(): assert isinstance(data.particledata, pd.DataFrame) assert np.array_equal( data.particledata.to_records(index=False), - np.core.records.fromrecords( + np.rec.fromrecords( [ (0, 1, 1, 0.5, 0.5, 0.5, 0.0, 0), (0, 1, 2, 0.5, 0.5, 0.5, 0.0, 0), @@ -121,7 +121,7 @@ def test_particledata_unstructured_ctor_with_partlocs_as_ndarray(): assert isinstance(data.particledata, pd.DataFrame) assert np.array_equal( data.particledata.to_records(index=False), - np.core.records.fromrecords( + np.rec.fromrecords( [ (0, 0.5, 0.5, 0.5, 0.0, 0), (1, 0.5, 0.5, 0.5, 0.0, 0), @@ -141,7 +141,7 @@ def test_particledata_unstructured_ctor_with_partlocs_as_list(): assert isinstance(data.particledata, pd.DataFrame) assert np.array_equal( data.particledata.to_records(index=False), - np.core.records.fromrecords( + np.rec.fromrecords( [ (0, 0.5, 0.5, 0.5, 0.0, 0), (1, 0.5, 0.5, 0.5, 0.0, 0), @@ -161,7 +161,7 @@ def test_particledata_unstructured_ctor_with_partlocs_as_ndarray(): assert isinstance(data.particledata, pd.DataFrame) assert np.array_equal( data.particledata.to_records(index=False), - np.core.records.fromrecords( + np.rec.fromrecords( [ (0, 0.5, 0.5, 0.5, 0.0, 0), (1, 0.5, 0.5, 0.5, 0.0, 0), @@ -181,7 +181,7 @@ def test_particledata_structured_ctor_with_partlocs_as_list_of_lists(): assert isinstance(data.particledata, pd.DataFrame) assert np.array_equal( data.particledata.to_records(index=False), - np.core.records.fromrecords( + np.rec.fromrecords( [ (0, 1, 1, 0.5, 0.5, 0.5, 0.0, 0), (0, 1, 2, 0.5, 0.5, 0.5, 0.0, 0), @@ -212,7 +212,7 @@ def test_particledata_to_prp_dis_1(): ) # each coord should be a tuple (irpt, k, i, j, x, y, z) # expected - exp = np.core.records.fromrecords( + exp = np.rec.fromrecords( [ (0, 1, 1, 0.5, 0.5, 0.5, 0.0, 0), (0, 1, 2, 0.5, 0.5, 0.5, 0.0, 0), @@ -658,7 +658,7 @@ def test_nodeparticledata_to_prp_dis_1_per_face(): assert len(rpts) == num_cells * 6 -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) def test_nodeparticledata_prp_disv_big(function_tmpdir): Lx = 10000.0 Ly = 10500.0 diff --git a/autotest/test_plot_map_view.py b/autotest/test_plot_map_view.py index 2303d24ec..e6fc424b6 100644 --- a/autotest/test_plot_map_view.py +++ b/autotest/test_plot_map_view.py @@ -214,7 +214,6 @@ def test_map_view_contour_array_structured(function_tmpdir, ndim, rng): plt.clf() elif ndim == 2: # 1 layer as 2D - # arr[-1, :] = np.nan # add nan to test nan handling pmv = PlotMapView(modelgrid=grid, layer=l) contours = pmv.contour_array( a=arr.reshape(nlay, nrow, ncol)[l, :, :] diff --git a/autotest/test_postprocessing.py b/autotest/test_postprocessing.py index b3b6282c4..d45ab84f3 100644 --- a/autotest/test_postprocessing.py +++ b/autotest/test_postprocessing.py @@ -231,13 +231,6 @@ def test_get_structured_faceflows_freyberg( # plt.show() plt.close("all") - # uv0 = np.column_stack((q0.U, q0.V)) - # uv1 = np.column_stack((q1.U, q1.V)) - # diff = uv1 - uv0 - # assert ( - # np.allclose(uv0, uv1) - # ), "get_faceflows quivers are not equal to specific discharge vectors" - @pytest.mark.mf6 @requires_exe("mf6") diff --git a/autotest/test_seawat.py b/autotest/test_seawat.py index 53fb2c500..aba1c125c 100644 --- a/autotest/test_seawat.py +++ b/autotest/test_seawat.py @@ -121,7 +121,7 @@ def test_seawat_henry(function_tmpdir): mswt.write_input() success, buff = mswt.run_model(silent=False) - assert success + assert success, buff @pytest.mark.slow @@ -227,13 +227,25 @@ def test_seawat_load_and_write(function_tmpdir, namfile, binary): m.write_input() - # TODO: run models in separate CI workflow? - # with regression testing & benchmarking? - run = False - if run: - success, buff = m.run_model(silent=False) - assert success +@requires_exe("swtv4") +def test_seawat_load_only(function_tmpdir): + namfile = swt4_namfiles()[0] + model_name = Path(namfile).name + m = Seawat.load(model_name, model_ws=Path(namfile).parent, verbose=True) + m.change_model_ws(function_tmpdir, reset_external=True) + m.write_input() + + files = function_tmpdir.glob("*.adv") + adv_file = next(files) + assert adv_file is not None + adv_file.unlink() + + load_only = ["btn", "dis", "bas6", "oc"] + m = Seawat.load( + model_name, model_ws=function_tmpdir, load_only=load_only, verbose=True + ) + assert set([pkg.upper() for pkg in load_only]) == set(m.get_package_list()) def test_vdf_vsc(function_tmpdir): diff --git a/autotest/test_sfr.py b/autotest/test_sfr.py index 34a35ddc8..c50dcffe5 100644 --- a/autotest/test_sfr.py +++ b/autotest/test_sfr.py @@ -162,8 +162,6 @@ def sfr_process(mfnam, sfrfile, model_ws, outfolder): "test1tr.nam", "test1tr.sfr", mf2005_model_path, function_tmpdir ) - # assert list(sfr.dataset_5.keys()) == [0, 1] - m, sfr = sfr_process( "testsfr2_tab.nam", "testsfr2_tab_ICALC1.sfr", @@ -238,7 +236,7 @@ def interpolate_to_reaches(sfr): sfr.get_slopes(minimum_slope=-100, maximum_slope=100) reach_inds = 29 outreach = sfr.reach_data.outreach[reach_inds] - out_inds = np.where(sfr.reach_data.reachID == outreach) + out_inds = np.asarray(sfr.reach_data.reachID == outreach).nonzero() assert ( sfr.reach_data.slope[reach_inds] == ( @@ -373,7 +371,7 @@ def test_const(sfr_data): assert True -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_export(function_tmpdir, sfr_data): m = Modflow() dis = ModflowDis(m, 1, 10, 10, lenuni=2, itmuni=4) @@ -429,7 +427,6 @@ def test_example(mf2005_model_path): delimiter=",", names=True, ) - # segment_data = {0: ss_segment_data} channel_flow_data = { 0: { @@ -480,7 +477,6 @@ def test_example(mf2005_model_path): dataset_5=dataset_5, ) - # assert istcb2 in m.package_units assert istcb2 in m.output_units assert True diff --git a/autotest/test_shapefile_utils.py b/autotest/test_shapefile_utils.py index b347806f1..5d1292c7d 100644 --- a/autotest/test_shapefile_utils.py +++ b/autotest/test_shapefile_utils.py @@ -17,7 +17,7 @@ from .test_grid import minimal_unstructured_grid_info, minimal_vertex_grid_info -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_model_attributes_to_shapefile(example_data_path, function_tmpdir): # freyberg mf2005 model name = "freyberg" @@ -53,7 +53,7 @@ def test_model_attributes_to_shapefile(example_data_path, function_tmpdir): assert shpfile_path.exists() -@requires_pkg("pyproj", "shapefile", "shapely") +@requires_pkg("pyproj", "pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_write_grid_shapefile( minimal_unstructured_grid_info, minimal_vertex_grid_info, function_tmpdir ): diff --git a/autotest/test_specific_discharge.py b/autotest/test_specific_discharge.py index 8a9703059..cd218ab30 100644 --- a/autotest/test_specific_discharge.py +++ b/autotest/test_specific_discharge.py @@ -430,7 +430,7 @@ def specific_discharge_comprehensive(function_tmpdir): assert np.isnan(qx[1, 0, 1]) # overall check - overall = np.nansum(qz) # np.nansum(qx) + np.nansum(qy) + np.nansum(qz) + overall = np.nansum(qz) assert np.allclose(overall, -4.43224582939148) # plot discharge in map view diff --git a/autotest/test_str.py b/autotest/test_str.py index 85c23333f..ccb9e72bf 100644 --- a/autotest/test_str.py +++ b/autotest/test_str.py @@ -28,7 +28,7 @@ def test_str_issue1164(function_tmpdir, example_data_path): # adjust stress period data spd0 = m.str.stress_period_data[0] - spd0["flow"][0] = 2.1149856e6 # 450000000000000000.0000e-17 + spd0["flow"][0] = 2.1149856e6 m.str.stress_period_data[0] = spd0 # write model datasets and run fixed diff --git a/autotest/test_subwt.py b/autotest/test_subwt.py index da649c19b..484a0f3bd 100644 --- a/autotest/test_subwt.py +++ b/autotest/test_subwt.py @@ -55,16 +55,10 @@ def test_subwt(function_tmpdir, ibound_path): ss=1.0e-6, ) - # temp_ib = np.ones((ml.nrow,ml.ncol),dtype=int) - # np.savetxt('temp_ib.dat',temp_ib,fmt='%1d') ibound = np.loadtxt(ibound_path) ibound[ibound == 5] = -1 ModflowBas(ml, ibound=ibound, strt=100.0) - # sp1_wells = pd.DataFrame(data=np.argwhere(ibound == 2), columns=['i', 'j']) - # sp1_wells.loc[:, 'k'] = 0 - # sp1_wells.loc[:, 'flux'] = 2200.0 - # sp1_wells = sp1_wells.loc[:, ['k', 'i', 'j', 'flux']].values.tolist() idxs = np.argwhere(ibound == 2) sp1_wells = [] for idx in idxs: @@ -111,8 +105,6 @@ def test_subwt(function_tmpdir, ibound_path): ml.run_model() - # contents = [f for f in function_tmpdir.glob("*.hds")] - hds_geo = HeadFile( function_tmpdir / f"{ml.name}.swt_geostatic_stress.hds", text="stress", diff --git a/autotest/test_uzf.py b/autotest/test_uzf.py index b95a852bb..9726163f0 100644 --- a/autotest/test_uzf.py +++ b/autotest/test_uzf.py @@ -639,8 +639,6 @@ def test_uzf_negative_iuzfopt(function_tmpdir): seepsurfk=True, ) - # uzf.write_file(os.path.join(model_ws, "uzf_neg.uzf")) - ml.write_input() success, buff = ml.run_model() assert success, "UZF model with -1 iuzfopt failed to run" @@ -649,15 +647,12 @@ def test_uzf_negative_iuzfopt(function_tmpdir): "uzf_neg.nam", version="mfnwt", model_ws=function_tmpdir ) - pet = ml2.uzf.pet.array - extpd = ml2.uzf.pet.array - - assert ( - np.max(pet) == np.min(pet) and np.max(pet) != 0.1 - ), "Read error for iuzfopt less than 0" - assert ( - np.max(extpd) == np.min(extpd) and np.max(extpd) != 0.2 - ), "Read error for iuzfopt less than 0" + np.testing.assert_array_equal( + ml2.uzf.pet.array, np.full((2, 1, 10, 10), 0.1, np.float32) + ) + np.testing.assert_array_equal( + ml2.uzf.extdp.array, np.full((2, 1, 10, 10), 0.2, np.float32) + ) def test_optionsblock_auxillary_typo(): diff --git a/autotest/test_zonbud_utility.py b/autotest/test_zonbud_utility.py index 4dd16c70c..86991e6b2 100644 --- a/autotest/test_zonbud_utility.py +++ b/autotest/test_zonbud_utility.py @@ -113,8 +113,8 @@ def test_compare2zonebudget(cbc_f, zon_f, zbud_f, rtol): zb_arr = zba[zba["totim"] == time] fp_arr = fpa[fpa["totim"] == time] for name in fp_arr["name"]: - r1 = np.where(zb_arr["name"] == name) - r2 = np.where(fp_arr["name"] == name) + r1 = np.asarray(zb_arr["name"] == name).nonzero() + r2 = np.asarray(fp_arr["name"] == name).nonzero() if r1[0].shape[0] < 1 or r2[0].shape[0] < 1: continue if r1[0].shape[0] != r2[0].shape[0]: @@ -125,12 +125,6 @@ def test_compare2zonebudget(cbc_f, zon_f, zbud_f, rtol): mxdiff = np.abs(a1 - a2).max() idxloc = np.argmax(np.abs(a1 - a2)) - # txt = '{}: {} - Max: {} a1: {} a2: {}'.format(time, - # name, - # mxdiff, - # a1[idxloc], - # a2[idxloc]) - # print(txt) s = f"Zonebudget arrays do not match at time {time} ({name}): {mxdiff}." assert allclose, s diff --git a/code.json b/code.json deleted file mode 100644 index 89e967b7f..000000000 --- a/code.json +++ /dev/null @@ -1,49 +0,0 @@ -[ - { - "status": "Release", - "languages": [ - "python" - ], - "repositoryURL": "https://code.usgs.gov/usgs/modflow/flopy.git", - "disclaimerURL": "https://code.usgs.gov/usgs/modflow/flopy/blob/master/DISCLAIMER.md", - "name": "flopy", - "tags": [ - "MODFLOW", - "MODFLOW 6", - "MODFLOW-2005", - "MODFLOW-NWT", - "MODFLOW-USG", - "MODFLOW-2000", - "MT3DMS", - "MT3D-USGS", - "SEAWAT", - "MODPATH", - "groundwater model", - "transport model", - "python" - ], - "contact": { - "name": "Joseph D. Hughes", - "email": "jdhughes@usgs.gov" - }, - "downloadURL": "https://code.usgs.gov/usgs/modflow/flopy/archive/master.zip", - "vcs": "git", - "laborHours": -1, - "version": "3.7.0", - "date": { - "metadataLastUpdated": "2024-05-23" - }, - "organization": "U.S. Geological Survey", - "permissions": { - "licenses": [ - { - "URL": "https://code.usgs.gov/usgs/modflow/flopy/blob/master/LICENSE.md", - "name": "Public Domain, CC0-1.0" - } - ], - "usageType": "openSource" - }, - "homepageURL": "https://code.usgs.gov/usgs/modflow/flopy/", - "description": "FloPy is a python package to create, run, and post-process MODFLOW-based models." - } -] diff --git a/docs/PyPI_release.md b/docs/PyPI_release.md index 0c8acdce5..016a42854 100644 --- a/docs/PyPI_release.md +++ b/docs/PyPI_release.md @@ -30,16 +30,4 @@ How to Cite *Software/Code citation for FloPy:* -[Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.7.0: U.S. Geological Survey Software Release, 23 May 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH) - - -Disclaimer ----------- - -This software is provided "as is" and "as-available", and makes no -representations or warranties of any kind concerning the software, whether -express, implied, statutory, or other. This includes, without limitation, -warranties of title, merchantability, fitness for a particular purpose, -non-infringement, absence of latent or other defects, accuracy, or the -presence or absence of errors, whether or not known or discoverable. - +[Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.8.0: U.S. Geological Survey Software Release, 08 August 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH) diff --git a/etc/environment.yml b/etc/environment.yml index c16a75b5b..504f80c86 100644 --- a/etc/environment.yml +++ b/etc/environment.yml @@ -6,8 +6,9 @@ dependencies: # required - python>=3.8 - - numpy>=1.15.0,<2.0.0 + - numpy>=1.20.3 - matplotlib>=1.4.0 + - pandas>=2.0.0 # lint - cffconvert @@ -18,6 +19,7 @@ dependencies: - flaky - filelock - jupyter + - jupyter_client>=8.4.0 - jupytext - pip: - git+https://github.com/MODFLOW-USGS/modflow-devtools.git @@ -32,7 +34,6 @@ dependencies: # optional - affine - scipy - - pandas - netcdf4 - pyshp - rasterio diff --git a/examples/data/groundwater2023/geometries.yml b/examples/data/groundwater2023/geometries.yml new file mode 100644 index 000000000..c45d7d580 --- /dev/null +++ b/examples/data/groundwater2023/geometries.yml @@ -0,0 +1,137 @@ +boundary: |+ + 1.868012422360248456e+05 4.695652173913043953e+04 + 1.790372670807453396e+05 5.204968944099379587e+04 + 1.729813664596273447e+05 5.590062111801243009e+04 + 1.672360248447204940e+05 5.987577639751553215e+04 + 1.631987577639751253e+05 6.335403726708075556e+04 + 1.563664596273291972e+05 6.819875776397516893e+04 + 1.509316770186335489e+05 7.229813664596274612e+04 + 1.453416149068323139e+05 7.527950310559007630e+04 + 1.395962732919254631e+05 7.627329192546584818e+04 + 1.357142857142857101e+05 7.664596273291927355e+04 + 1.329192546583850926e+05 7.751552795031057030e+04 + 1.268633540372670832e+05 8.062111801242237561e+04 + 1.218944099378881947e+05 8.285714285714286962e+04 + 1.145962732919254486e+05 8.571428571428572468e+04 + 1.069875776397515583e+05 8.869565217391305487e+04 + 1.023291925465838431e+05 8.931677018633540138e+04 + 9.456521739130433707e+04 9.068322981366459862e+04 + 8.804347826086955320e+04 9.080745341614908830e+04 + 7.950310559006211406e+04 9.267080745341615693e+04 + 7.562111801242236106e+04 9.391304347826087906e+04 + 6.692546583850930620e+04 9.602484472049689793e+04 + 5.667701863354037778e+04 9.763975155279504543e+04 + 4.906832298136646568e+04 9.689440993788820924e+04 + 3.897515527950309479e+04 9.540372670807455142e+04 + 3.167701863354036323e+04 9.304347826086958230e+04 + 2.375776397515527788e+04 8.757763975155279331e+04 + 1.847826086956521613e+04 8.161490683229814749e+04 + 1.164596273291925172e+04 7.739130434782608063e+04 + 6.211180124223596977e+03 7.055900621118013805e+04 + 4.347826086956512881e+03 6.422360248447205959e+04 + 1.863354037267072272e+03 6.037267080745341809e+04 + 2.639751552795024509e+03 5.602484472049689793e+04 + 1.552795031055893560e+03 5.279503105590062478e+04 + 7.763975155279410956e+02 4.186335403726709046e+04 + 2.018633540372667312e+03 3.813664596273292409e+04 + 6.055900621118013078e+03 3.341614906832297856e+04 + 1.335403726708074100e+04 2.782608695652173992e+04 + 2.577639751552794405e+04 2.086956521739130767e+04 + 3.416149068322980747e+04 1.763975155279503815e+04 + 4.642857142857142753e+04 1.440993788819875044e+04 + 5.636645962732918997e+04 1.130434782608694877e+04 + 6.459627329192546313e+04 9.813664596273290954e+03 + 8.555900621118012350e+04 6.832298136645956220e+03 + 9.829192546583850344e+04 5.093167701863346338e+03 + 1.085403726708074391e+05 4.347826086956525614e+03 + 1.200310559006211115e+05 4.223602484472040487e+03 + 1.296583850931677007e+05 4.347826086956525614e+03 + 1.354037267080745369e+05 5.590062111801232277e+03 + 1.467391304347825935e+05 1.267080745341615875e+04 + 1.563664596273291972e+05 1.937888198757762802e+04 + 1.630434782608695677e+05 2.198757763975155467e+04 + 1.694099378881987650e+05 2.434782608695652743e+04 + 1.782608695652173774e+05 2.981366459627329095e+04 + 1.833850931677018234e+05 3.180124223602484562e+04 + 1.868012422360248456e+05 3.577639751552795497e+04 +streamseg1: |+ + 1.868012422360248456e+05 4.086956521739130403e+04 + 1.824534161490683327e+05 4.086956521739130403e+04 + 1.770186335403726553e+05 4.124223602484472940e+04 + 1.737577639751552779e+05 4.186335403726709046e+04 + 1.703416149068323139e+05 4.310559006211180531e+04 + 1.670807453416148783e+05 4.397515527950310934e+04 + 1.636645962732919143e+05 4.484472049689441337e+04 + 1.590062111801242281e+05 4.559006211180124228e+04 + 1.555900621118012350e+05 4.559006211180124228e+04 + 1.510869565217391064e+05 4.546583850931677443e+04 + 1.479813664596273156e+05 4.534161490683229931e+04 + 1.453416149068323139e+05 4.496894409937888850e+04 + 1.377329192546583654e+05 4.447204968944099528e+04 + 1.326086956521739194e+05 4.447204968944099528e+04 + 1.285714285714285652e+05 4.434782608695652743e+04 + 1.245341614906832110e+05 4.472049689440993825e+04 + 1.215838509316770069e+05 4.509316770186335634e+04 + 1.161490683229813585e+05 4.509316770186335634e+04 + 1.125776397515527933e+05 4.459627329192547040e+04 + 1.074534161490683036e+05 4.385093167701864149e+04 + 1.018633540372670686e+05 4.347826086956522340e+04 + 9.798136645962731563e+04 4.360248447204969125e+04 + 9.223602484472049400e+04 4.310559006211180531e+04 + 8.602484472049689793e+04 4.198757763975155831e+04 + 7.981366459627327276e+04 4.173913043478261534e+04 + 7.468944099378881219e+04 4.248447204968944425e+04 + 7.034161490683228476e+04 4.385093167701864149e+04 + 6.785714285714285506e+04 4.621118012422360334e+04 + 6.583850931677018525e+04 4.919254658385094081e+04 + 6.319875776397513982e+04 5.192546583850932075e+04 + 6.009316770186335634e+04 5.677018633540373412e+04 + 5.605590062111800216e+04 5.950310559006211406e+04 + 5.279503105590060295e+04 6.124223602484472940e+04 + 4.751552795031056303e+04 6.211180124223603343e+04 + 3.990683229813664366e+04 6.335403726708075556e+04 + 3.276397515527949508e+04 6.409937888198757719e+04 + 2.934782608695651652e+04 6.509316770186336362e+04 + 2.546583850931676716e+04 6.832298136645962950e+04 +streamseg2: |+ + 7.025161490683228476e+04 4.375093167701864149e+04 + 6.816770186335404287e+04 4.273291925465839449e+04 + 6.490683229813665093e+04 4.211180124223603343e+04 + 6.164596273291925900e+04 4.173913043478262261e+04 + 5.776397515527951327e+04 4.124223602484472940e+04 + 5.450310559006211406e+04 4.049689440993789322e+04 + 4.984472049689442065e+04 3.937888198757764621e+04 + 4.534161490683231386e+04 3.801242236024845624e+04 + 4.114906832298137306e+04 3.664596273291926627e+04 + 3.913043478260868869e+04 3.565217391304348712e+04 + 3.649068322981366509e+04 3.416149068322981475e+04 + 3.322981366459628043e+04 3.242236024844721760e+04 + 3.012422360248447148e+04 3.105590062111801672e+04 + 2.608695652173913550e+04 2.957521739130435890e+04 +streamseg3: |+ + 1.059006211180124228e+05 4.335403726708074828e+04 + 1.029503105590062187e+05 4.223602484472050128e+04 + 1.004658385093167890e+05 4.024844720496894297e+04 + 9.937888198757765349e+04 3.788819875776398112e+04 + 9.627329192546584818e+04 3.490683229813664366e+04 + 9.285714285714286962e+04 3.316770186335403559e+04 + 8.897515527950311662e+04 3.093167701863354159e+04 + 8.338509316770188161e+04 2.795031055900621504e+04 + 7.872670807453416637e+04 2.670807453416148928e+04 + 7.329192546583851799e+04 2.385093167701863058e+04 + 6.863354037267081731e+04 2.111801242236025064e+04 + 6.304347826086958230e+04 1.863354037267081003e+04 +streamseg4: |+ + 1.371118012422360480e+05 4.472049689440994553e+04 + 1.321428571428571595e+05 4.720496894409938250e+04 + 1.285714285714285652e+05 4.981366459627330187e+04 + 1.243788819875776535e+05 5.341614906832298584e+04 + 1.189440993788819906e+05 5.540372670807454415e+04 + 1.125776397515527933e+05 5.627329192546584818e+04 + 1.065217391304347839e+05 5.726708074534162733e+04 + 1.020186335403726698e+05 5.913043478260870324e+04 + 9.409937888198759174e+04 6.273291925465840177e+04 + 9.192546583850932075e+04 6.633540372670808574e+04 + 8.881987577639751544e+04 7.242236024844722124e+04 + 8.586956521739131131e+04 7.552795031055902655e+04 + 8.369565217391305487e+04 7.962732919254660374e+04 diff --git a/flopy/DISCLAIMER.md b/flopy/DISCLAIMER.md deleted file mode 100644 index 0e68af88f..000000000 --- a/flopy/DISCLAIMER.md +++ /dev/null @@ -1,9 +0,0 @@ -Disclaimer ----------- - -This software is provided "as is" and "as-available", and makes no -representations or warranties of any kind concerning the software, whether -express, implied, statutory, or other. This includes, without limitation, -warranties of title, merchantability, fitness for a particular purpose, -non-infringement, absence of latent or other defects, accuracy, or the -presence or absence of errors, whether or not known or discoverable. diff --git a/flopy/discretization/grid.py b/flopy/discretization/grid.py index 87e24d457..2681a671c 100644 --- a/flopy/discretization/grid.py +++ b/flopy/discretization/grid.py @@ -128,7 +128,7 @@ class Grid: ndarrays for the x, y, and z coordinates Methods - ---------- + ------- get_coords(x, y) transform point or array of points x, y from model coordinates to spatial coordinates @@ -455,15 +455,15 @@ def saturated_thickness(self, array, mask=None): bot = self.remove_confining_beds(bot) array = self.remove_confining_beds(array) - idx = np.where((array < top) & (array > bot)) + idx = np.asarray((array < top) & (array > bot)).nonzero() thickness[idx] = array[idx] - bot[idx] - idx = np.where(array <= bot) + idx = np.asarray(array <= bot).nonzero() thickness[idx] = 0.0 if mask is not None: if isinstance(mask, (float, int)): mask = [float(mask)] for mask_value in mask: - thickness[np.where(array == mask_value)] = np.nan + thickness[np.asarray(array == mask_value).nonzero()] = np.nan return thickness def saturated_thick(self, array, mask=None): @@ -591,11 +591,6 @@ def zvertices(self): def xyzvertices(self): raise NotImplementedError("must define xyzvertices in child class") - # @property - # def indices(self): - # raise NotImplementedError( - # 'must define indices in child ' - # 'class to use this base class') @property def cross_section_vertices(self): return self.xyzvertices[0], self.xyzvertices[1] @@ -962,8 +957,6 @@ def get_local_coords(self, x, y): x, y = geometry.transform( x, y, self._xoff, self._yoff, self.angrot_radians, inverse=True ) - # x -= self._xoff - # y -= self._yoff return x, y diff --git a/flopy/discretization/readme.md b/flopy/discretization/readme.md deleted file mode 100644 index 9316037ea..000000000 --- a/flopy/discretization/readme.md +++ /dev/null @@ -1 +0,0 @@ -## Development notes for grid module \ No newline at end of file diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py index befe50ded..13f15346c 100644 --- a/flopy/discretization/structuredgrid.py +++ b/flopy/discretization/structuredgrid.py @@ -140,7 +140,7 @@ class for a structured model grid y-location points for the edges of the model grid Methods - ---------- + ------- get_cell_vertices(i, j) returns vertices for a single cell at row, column i, j. """ @@ -930,7 +930,7 @@ def intersect(self, x, y, z=None, local=False, forgive=False): "x, y point given is outside of the model area" ) else: - col = np.where(xcomp)[0][-1] + col = np.asarray(xcomp).nonzero()[0][-1] ycomp = y < ye if np.all(ycomp) or not np.any(ycomp): @@ -941,7 +941,7 @@ def intersect(self, x, y, z=None, local=False, forgive=False): "x, y point given is outside of the model area" ) else: - row = np.where(ycomp)[0][-1] + row = np.asarray(ycomp).nonzero()[0][-1] if np.any(np.isnan([row, col])): row = col = np.nan if z is not None: diff --git a/flopy/discretization/vertexgrid.py b/flopy/discretization/vertexgrid.py index 580f6f9b4..ea49d1291 100644 --- a/flopy/discretization/vertexgrid.py +++ b/flopy/discretization/vertexgrid.py @@ -61,7 +61,7 @@ class for a vertex model grid returns list of cells and their vertices Methods - ---------- + ------- get_cell_vertices(cellid) returns vertices for a single cell at cellid. @@ -190,7 +190,6 @@ def shape(self): @property def top_botm(self): new_top = np.expand_dims(self._top, 0) - # new_botm = np.expand_dims(self._botm, 0) return np.concatenate((new_top, self._botm), axis=0) @property diff --git a/flopy/export/metadata.py b/flopy/export/metadata.py index f7bc32685..edb47af45 100644 --- a/flopy/export/metadata.py +++ b/flopy/export/metadata.py @@ -62,25 +62,19 @@ def __init__(self, sciencebase_id, model): # recommended global attributes self.naming_authority = "ScienceBase" # org. that provides the id - # self.history = None # This is a character array with a line for each invocation of a program that has modified the dataset. # Well-behaved generic netCDF applications should append a line containing: # date, time of day, user name, program name and command arguments. self.source = ( model.model_ws ) # The method of production of the original data. # If it was model-generated, source should name the model and its version. - # self.processing_level = None # A textual description of the processing (or quality control) level of the data. - # self.comment = None # Miscellaneous information about the data, not captured elsewhere. # This attribute is defined in the CF Conventions. self.acknowledgement = self._get_xml_attribute("datacred") - # self.license = None # - # self.standard_name_vocabulary = None self.date_created = self.sb["provenance"]["linkProcess"].get( "dateCreated" ) self.creator_name = self.creator.get("name") self.creator_email = self.creator.get("email") - # self.creator_url = self.sb['webLinks'][0].get('uri') self.creator_institution = self.creator["organization"].get( "displayText" ) diff --git a/flopy/export/netcdf.py b/flopy/export/netcdf.py index 6ad1daffb..eda44e117 100644 --- a/flopy/export/netcdf.py +++ b/flopy/export/netcdf.py @@ -187,8 +187,6 @@ def __init__( if self.model_grid.grid_type == "structured": self.dimension_names = ("layer", "y", "x") STANDARD_VARS.extend(["delc", "delr"]) - # elif self.model_grid.grid_type == 'vertex': - # self.dimension_names = ('layer', 'ncpl') else: raise Exception( f"Grid type {self.model_grid.grid_type} not supported." @@ -400,9 +398,6 @@ def append(self, other, suffix="_1"): attrs["name"] = new_vname attrs["long_name"] = attrs["long_name"] + " " + suffix var = self.nc.variables[vname_norm] - # assert var.shape == array.shape,\ - # "{0} shape ({1}) doesn't make array shape ({2})".\ - # format(new_vname,str(var.shape),str(array.shape)) new_var = self.create_variable( new_vname, attrs, var.dtype, dimensions=var.dimensions ) @@ -637,7 +632,7 @@ def difference( d_data[np.isnan(d_data)] = FILLVALUE if mask_zero_diff: - d_data[np.where(d_data == 0.0)] = FILLVALUE + d_data[np.asarray(d_data == 0.0).nonzero()] = FILLVALUE var = new_net.create_variable( vname, attrs, s_var.dtype, dimensions=s_var.dimensions @@ -944,14 +939,6 @@ def initialize_file(self, time_values=None): "This grid HAS been rotated before being saved to NetCDF. " "To compute the unrotated grid, use the origin point and this array." ) - # else: - # vertices - # attribs = {"units": self.model_grid.lenuni.strip('s'), - # "long_name": NC_LONG_NAMES.get("vertices", - # "List of vertices used in the model by cell"), - # } - # vertices = self.create_variable('vertices', attribs, dimensions=('ncpl',)) - # vertices[:] = self.model_grid.vertices # Workaround for CF/CDM. # http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/ @@ -976,7 +963,7 @@ def initialize_group( Method to initialize a new group within a netcdf file. This group can have independent dimensions from the global dimensions - Parameters: + Parameters ---------- name : str name of the netcdf group @@ -1248,18 +1235,6 @@ def create_variable( if self.nc is None: self.initialize_file() - # check that the requested dimension exists and - # build up the chuck sizes - # chunks = [] - # for dimension in dimensions: - # assert self.nc.dimensions.get(dimension) is not None, \ - # "netcdf.create_variable() dimension not found:" + dimension - # chunk = self.chunks[dimension] - # assert chunk is not None, \ - # "netcdf.create_variable() chunk size of {0} is None in self.chunks". \ - # format(dimension) - # chunks.append(chunk) - self.var_attr_dict[name] = attributes var = self.nc.createVariable( @@ -1268,8 +1243,7 @@ def create_variable( dimensions, fill_value=self.fillvalue, zlib=True, - ) # , - # chunksizes=tuple(chunks)) + ) for k, v in attributes.items(): try: var.setncattr(k, v) @@ -1299,7 +1273,6 @@ def add_global_attributes(self, attr_dict): """ if self.nc is None: - # self.initialize_file() mess = ( "NetCDF.add_global_attributes() should only " "be called after the file has been initialized" @@ -1408,7 +1381,6 @@ def get_entries(ds): return stuff # get a list of the flopy classes - # packages = inspect.getmembers(flopy.modflow, inspect.isclass) packages = [(pp.name[0], pp) for pp in self.model.packagelist] # get a list of the NetCDF variables attr = [v.split("_")[-1] for v in self.nc.variables] diff --git a/flopy/export/shapefile_utils.py b/flopy/export/shapefile_utils.py index b32df38ec..7c2303874 100644 --- a/flopy/export/shapefile_utils.py +++ b/flopy/export/shapefile_utils.py @@ -322,7 +322,6 @@ def model_attributes_to_shapefile( ) continue name = shape_attr_name(a.name, keep_layer=True) - # name = a.name.lower() array_dict[name] = a.array elif a.data_type == DataType.array3d: # Not sure how best to check if an object has array data @@ -362,9 +361,7 @@ def model_attributes_to_shapefile( assert arr.shape == horz_shape name = f"{aname}_{ilay + 1}" array_dict[name] = arr - elif ( - a.data_type == DataType.transient2d - ): # elif isinstance(a, Transient2d): + elif a.data_type == DataType.transient2d: # Not sure how best to check if an object has array data try: assert a.array is not None @@ -379,9 +376,7 @@ def model_attributes_to_shapefile( arr = a.array[kper][0] assert arr.shape == horz_shape array_dict[name] = arr - elif ( - a.data_type == DataType.transientlist - ): # elif isinstance(a, MfList): + elif a.data_type == DataType.transientlist: try: list(a.masked_4D_arrays_itr()) except: diff --git a/flopy/export/utils.py b/flopy/export/utils.py index 1c3ee4d55..d0b577398 100644 --- a/flopy/export/utils.py +++ b/flopy/export/utils.py @@ -203,7 +203,7 @@ def _add_output_nc_variable( logger.log(f"creating array for {var_name}") for mask_val in mask_vals: - array[np.where(array == mask_val)] = np.nan + array[np.asarray(array == mask_val).nonzero()] = np.nan mx, mn = np.nanmax(array), np.nanmin(array) array[np.isnan(array)] = netcdf.FILLVALUE @@ -324,10 +324,11 @@ def output_helper( Returns ------- - None - Note: + None + + Note ---- - casts down double precision to single precision for netCDF files + casts down double precision to single precision for netCDF files """ assert isinstance(ml, (BaseModel, ModelInterface)) @@ -867,7 +868,7 @@ def mflist_export(f: Union[str, os.PathLike, NetCdf], mfl, **kwargs): export helper for MfList instances Parameters - ----------- + ---------- f : str or PathLike or NetCdf file path or existing export instance type (NetCdf only for now) mfl : MfList instance @@ -927,7 +928,6 @@ def mflist_export(f: Union[str, os.PathLike, NetCdf], mfl, **kwargs): arrays = mfl.to_array(kk) for name, array in arrays.items(): for k in range(array.shape[0]): - # aname = name+"{0:03d}_{1:02d}".format(kk, k) n = shapefile_utils.shape_attr_name(name, length=4) aname = f"{n}{k + 1}{int(kk) + 1}" array_dict[aname] = array[k] @@ -962,11 +962,7 @@ def mflist_export(f: Union[str, os.PathLike, NetCdf], mfl, **kwargs): elif isinstance(f, NetCdf) or isinstance(f, dict): base_name = mfl.package.name[0].lower() - # f.log("getting 4D masked arrays for {0}".format(base_name)) - # m4d = mfl.masked_4D_arrays - # f.log("getting 4D masked arrays for {0}".format(base_name)) - # for name, array in m4d.items(): for name, array in mfl.masked_4D_arrays_itr(): var_name = f"{base_name}_{name}" if isinstance(f, dict): @@ -1024,7 +1020,7 @@ def transient2d_export(f: Union[str, os.PathLike], t2d, fmt=None, **kwargs): export helper for Transient2d instances Parameters - ----------- + ---------- f : str or PathLike filename or existing export instance type (NetCdf only for now) t2d : Transient2d instance @@ -1076,9 +1072,7 @@ def transient2d_export(f: Union[str, os.PathLike], t2d, fmt=None, **kwargs): ibnd = np.abs(modelgrid.idomain).sum(axis=0) mask = ibnd == 0 - # f.log("getting 4D array for {0}".format(t2d.name_base)) array = t2d.array - # f.log("getting 4D array for {0}".format(t2d.name_base)) with np.errstate(invalid="ignore"): if array.dtype not in [int, np.int32, np.int64]: if mask is not None: @@ -1090,12 +1084,6 @@ def transient2d_export(f: Union[str, os.PathLike], t2d, fmt=None, **kwargs): mx, mn = np.nanmax(array), np.nanmin(array) array[array <= min_valid] = netcdf.FILLVALUE array[array >= max_valid] = netcdf.FILLVALUE - # if t2d.model.bas6 is not None: - # array[:, 0, t2d.model.bas6.ibound.array[0] == 0] = \ - # f.fillvalue - # elif t2d.model.btn is not None: - # array[:, 0, t2d.model.btn.icbund.array[0] == 0] = \ - # f.fillvalue var_name = t2d.name.replace("_", "") if isinstance(f, dict): @@ -1184,7 +1172,7 @@ def array3d_export(f: Union[str, os.PathLike], u3d, fmt=None, **kwargs): export helper for Transient2d instances Parameters - ----------- + ---------- f : str or PathLike filename or existing export instance type (NetCdf only for now) u3d : Util3d instance @@ -1236,41 +1224,22 @@ def array3d_export(f: Union[str, os.PathLike], u3d, fmt=None, **kwargs): if isinstance(var_name, list) or isinstance(var_name, tuple): var_name = var_name[0] var_name = var_name.replace(" ", "_").lower() - # f.log("getting 3D array for {0}".format(var_name)) array = u3d.array - # this is for the crappy vcont in bcf6 - # if isinstance(f,NetCdf) and array.shape != f.shape: - # f.log("broadcasting 3D array for {0}".format(var_name)) - # full_array = np.empty(f.shape) - # full_array[:] = np.nan - # full_array[:array.shape[0]] = array - # array = full_array - # f.log("broadcasting 3D array for {0}".format(var_name)) - # f.log("getting 3D array for {0}".format(var_name)) - # mask = None if modelgrid.idomain is not None and "ibound" not in var_name: mask = modelgrid.idomain == 0 if mask is not None and array.shape != mask.shape: - # f.log("broadcasting 3D array for {0}".format(var_name)) full_array = np.empty(mask.shape) full_array[:] = np.nan full_array[: array.shape[0]] = array array = full_array - # f.log("broadcasting 3D array for {0}".format(var_name)) # runtime warning issued in some cases - need to track down cause # happens when NaN is already in array with np.errstate(invalid="ignore"): if array.dtype not in [int, np.int32, np.int64]: - # if u3d.model.modelgrid.bas6 is not None and "ibound" not - # in var_name: - # array[u3d.model.modelgrid.bas6.ibound.array == 0] = - # np.nan - # elif u3d.model.btn is not None and 'icbund' not in var_name: - # array[u3d.model.modelgrid.btn.icbund.array == 0] = np.nan if mask is not None: array[mask] = np.nan array[array <= min_valid] = np.nan @@ -1410,9 +1379,7 @@ def array2d_export( elif isinstance(f, NetCdf) or isinstance(f, dict): # try to mask the array - assume layer 1 ibound is a good mask - # f.log("getting 2D array for {0}".format(u2d.name)) array = u2d.array - # f.log("getting 2D array for {0}".format(u2d.name)) with np.errstate(invalid="ignore"): if array.dtype not in [int, np.int32, np.int64]: diff --git a/flopy/export/vtk.py b/flopy/export/vtk.py index 07024a5b8..e25c94695 100644 --- a/flopy/export/vtk.py +++ b/flopy/export/vtk.py @@ -574,8 +574,9 @@ def _build_hfbs(self, pkg): pts = [] for v in v1: - # ix = np.where(v2 == v) - ix = np.where((v2.T[0] == v[0]) & (v2.T[1] == v[1])) + ix = np.asarray( + (v2.T[0] == v[0]) & (v2.T[1] == v[1]) + ).nonzero() if len(ix[0]) > 0 and len(pts) < 2: pts.append(v2[ix[0][0]]) @@ -653,7 +654,7 @@ def _build_point_scalar_array(self, array): ps_array[pt] = array[value["idx"][ix]] else: ps_graph = self._point_scalar_numpy_graph.copy() - idxs = np.where(np.isnan(array)) + idxs = np.asarray(np.isnan(array)).nonzero() not_graphed = np.isin(ps_graph, idxs[0]) ps_graph[not_graphed] = -1 ps_array = np.where(ps_graph >= 0, array[ps_graph], np.nan) @@ -1489,7 +1490,7 @@ def write(self, f: Union[str, os.PathLike], kper=None): self.add_array(array, name) if per in self.__transient_vector: - d = self.__transient_vector[d] + d = self.__transient_vector[per] for name, vector in d.items(): self.add_vector(vector, name) diff --git a/flopy/mbase.py b/flopy/mbase.py index ee7bbd63f..0cf0f727e 100644 --- a/flopy/mbase.py +++ b/flopy/mbase.py @@ -570,37 +570,6 @@ def laycbd(self): except AttributeError: return None - # we don't need these - no need for controlled access to array_free_format - # def set_free_format(self, value=True): - # """ - # Set the free format flag for the model instance - # - # Parameters - # ---------- - # value : bool - # Boolean value to set free format flag for model. (default is True) - # - # Returns - # ------- - # - # """ - # if not isinstance(value, bool): - # print('Error: set_free_format passed value must be a boolean') - # return False - # self.array_free_format = value - # - # def get_free_format(self): - # """ - # Return the free format flag for the model - # - # Returns - # ------- - # out : bool - # Free format flag for the model - # - # """ - # return self.array_free_format - def next_unit(self, i=None): if i is not None: self.__onunit__ = i - 1 @@ -759,7 +728,6 @@ def __getattr__(self, item): else: return None - # return self.get_package(item) # to avoid infinite recursion if ( item == "_packagelist" @@ -956,8 +924,8 @@ def remove_output( self.output_binflag.pop(i) self.output_packages.pop(i) else: - msg = " either fname or unit must be passed to remove_output()" - raise Exception(msg) + msg = "either fname or unit must be passed to remove_output()" + raise TypeError(msg) def get_output( self, fname: Optional[Union[str, os.PathLike]] = None, unit=None @@ -985,8 +953,8 @@ def get_output( return self.output_fnames[i] return None else: - msg = " either fname or unit must be passed to get_output()" - raise Exception(msg) + msg = "either fname or unit must be passed to get_output()" + raise TypeError(msg) def set_output_attribute( self, @@ -1020,11 +988,10 @@ def set_output_attribute( idx = i break else: - msg = ( - " either fname or unit must be passed " + raise TypeError( + "either fname or unit must be passed " "to set_output_attribute()" ) - raise Exception(msg) if attr is not None: if idx is not None: for key, value in attr.items: @@ -1065,8 +1032,8 @@ def get_output_attribute( idx = i break else: - raise Exception( - " either fname or unit must be passed " + raise TypeError( + "either fname or unit must be passed " "to set_output_attribute()" ) v = None @@ -1147,8 +1114,8 @@ def remove_external( if u == unit: plist.append(i) else: - msg = " either fname or unit must be passed to remove_external()" - raise Exception(msg) + msg = "either fname or unit must be passed to remove_external()" + raise TypeError(msg) # remove external file j = 0 for i in plist: @@ -1188,10 +1155,6 @@ def add_existing_package( ptype = filename.split(".")[-1] ptype = str(ptype).upper() - # for pak in self.packagelist: - # if ptype in pak.name: - # print("BaseModel.add_existing_package() warning: " +\ - # "replacing existing package {0}".format(ptype)) class Obj: pass @@ -1332,10 +1295,6 @@ def change_model_ws( os.makedirs(new_pth) except: raise OSError(f"{new_pth} not valid, workspace-folder") - # line = '\n{} not valid, workspace-folder '.format(new_pth) + \ - # 'was changed to {}\n'.format(os.getcwd()) - # print(line) - # new_pth = os.getcwd() # --reset the model workspace old_pth = self._model_ws @@ -1369,14 +1328,10 @@ def _reset_external(self, pth, old_pth): for ext_file, output in zip( self.external_fnames, self.external_output ): - # new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1]) # this is a wicked mess if output: - # new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1]) new_ext_file = ext_file else: - # fpth = os.path.abspath(os.path.join(old_pth, ext_file)) - # new_ext_file = os.path.relpath(fpth, os.path.abspath(pth)) fdir = os.path.dirname(ext_file) if fdir == "": fpth = os.path.abspath(os.path.join(old_pth, ext_file)) @@ -1408,8 +1363,6 @@ def _set_name(self, value): def __setattr__(self, key, value): if key == "free_format_input": - # if self.bas6 is not None: - # self.bas6.ifrefm = value super().__setattr__(key, value) elif key == "name": self._set_name(value) @@ -1476,10 +1429,8 @@ def run_model( normal_msg=normal_msg, ) - def load_results(self): - print("load_results not implemented") - - return None + def load_results(self, **kwargs): + raise NotImplementedError("load_results not implemented") def write_input(self, SelPackList=False, check=False): """ @@ -1536,25 +1487,20 @@ def write_input(self, SelPackList=False, check=False): print(" ") # write name file self.write_name_file() - # os.chdir(org_dir) def write_name_file(self): """ Every Package needs its own writenamefile function """ - raise Exception( - "IMPLEMENTATION ERROR: writenamefile must be overloaded" - ) + raise NotImplementedError("write_name_file must be overloaded") def set_model_units(self): """ Every model needs its own set_model_units method """ - raise Exception( - "IMPLEMENTATION ERROR: set_model_units must be overloaded" - ) + raise NotImplementedError("set_model_units must be overloaded") @property def name(self): @@ -1678,7 +1624,7 @@ def plot(self, SelPackList=None, **kwargs): MfList dictionary key. (default is None) Returns - ---------- + ------- axes : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis are returned. @@ -1703,34 +1649,10 @@ def plot(self, SelPackList=None, **kwargs): ) return axes - def to_shapefile( - self, filename: Union[str, os.PathLike], package_names=None, **kwargs - ): - """ - Wrapper function for writing a shapefile for the model grid. If - package_names is not None, then search through the requested packages - looking for arrays that can be added to the shapefile as attributes - - Parameters - ---------- - filename : str or PathLike - Path of the shapefile to write - package_names : list of package names (e.g. ["dis","lpf"]) - Packages to export data arrays to shapefile. (default is None) - - Returns - ------- - None - - Examples - -------- - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> m.to_shapefile('model.shp', SelPackList) - - """ - warnings.warn("to_shapefile() is deprecated. use .export()") - self.export(filename, package_names=package_names) + def to_shapefile(self, *args, **kwargs): + """Raises AttributeError, use :meth:`export`.""" + # deprecated 3.2.4, changed to raise AttributeError version 3.8 + raise AttributeError(".to_shapefile() was removed; use .export()") def run_model( @@ -1829,8 +1751,6 @@ def run_model( def q_output(output, q): for line in iter(output.readline, b""): q.put(line) - # time.sleep(1) - # output.close() # create a list of arguments to pass to Popen if processors is not None: diff --git a/flopy/mf6/coordinates/modeldimensions.py b/flopy/mf6/coordinates/modeldimensions.py index 878f74a71..013efafd8 100644 --- a/flopy/mf6/coordinates/modeldimensions.py +++ b/flopy/mf6/coordinates/modeldimensions.py @@ -28,7 +28,7 @@ class DataDimensions: (optional) Methods - ---------- + ------- get_model_grid : () returns a model grid based on the current simulation data @@ -102,6 +102,15 @@ def model_subspace_size(self, subspace_string="", data_item_num=None): subspace_string ) + def get_cellid_size(self, data_item_name): + model_num = DatumUtil.cellid_model_num( + data_item_name, + self.structure.model_data, + self.package_dim.model_dim, + ) + model_grid = self.get_model_grid(model_num=model_num) + return model_grid.get_num_spatial_coordinates() + def get_model_dim(self, data_item_num, model_num=None): if ( self.package_dim.model_dim is None @@ -111,9 +120,14 @@ def get_model_dim(self, data_item_num, model_num=None): return self.package_dim.model_dim[0] else: if model_num is None: - model_num = self.structure.data_item_structures[data_item_num][ - -1 - ] + # see if the name of the data item indicates which model to use + item_name = self.structure.data_item_structures[ + data_item_num + ].name + if item_name[-2] == "m" and DatumUtil.is_int(item_name[-1]): + model_num = int(item_name[-1]) - 1 + else: + return self.package_dim.model_dim[0] if not ( len(self.structure.data_item_structures) > data_item_num ): @@ -133,8 +147,7 @@ def get_model_dim(self, data_item_num, model_num=None): f"{len(self.package_dim.model_dim)}." ) - if DatumUtil.is_int(model_num): - return self.package_dim.model_dim[int(model_num)] + return self.package_dim.model_dim[model_num] class PackageDimensions: @@ -151,7 +164,7 @@ class PackageDimensions: Tuple representing the path to this package Methods - ---------- + ------- get_aux_variables : (model_num=0) returns the package's aux variables boundnames : (model_num=0) @@ -322,7 +335,7 @@ class ModelDimensions: object containing simulation time information Methods - ---------- + ------- get_model_grid : () returns a model grid based on the current simulation data diff --git a/flopy/mf6/coordinates/modelgrid.py b/flopy/mf6/coordinates/modelgrid.py index 7b3f69854..50a612511 100644 --- a/flopy/mf6/coordinates/modelgrid.py +++ b/flopy/mf6/coordinates/modelgrid.py @@ -20,7 +20,7 @@ class ModelCell: id of model cell Methods - ---------- + ------- See Also -------- @@ -50,7 +50,7 @@ class UnstructuredModelCell(ModelCell): name of the model Methods - ---------- + ------- get_cellid : () returns the cellid get_top : () @@ -341,7 +341,7 @@ class ModelGrid: DiscretizationType.DISU) Methods - ---------- + ------- grid_type : () returns the grid type grid_type_consistent : () @@ -789,7 +789,7 @@ class UnstructuredModelGrid(ModelGrid): contains all simulation related data Methods - ---------- + ------- get_unstruct_jagged_array_list : {} returns a dictionary of jagged arrays used in the unstructured grid diff --git a/flopy/mf6/coordinates/simulationtime.py b/flopy/mf6/coordinates/simulationtime.py index df58080b0..9ef891508 100644 --- a/flopy/mf6/coordinates/simulationtime.py +++ b/flopy/mf6/coordinates/simulationtime.py @@ -51,16 +51,6 @@ def get_num_steps(self): def get_mult(self): return self._tsmult - # def get_ts_start_time(self, timestep): - - # def get_sp_start_time(self, timestep): - - # def get_ts_end_time(self, timestep): - - # def get_sp_end_time(self, timestep): - - # def get_ts_length(self, timestep): - class SimulationTime: """ @@ -92,14 +82,6 @@ class SimulationTime: def __init__(self, simdata): self.simdata = simdata - # self.time_units = simdata[('TDIS', 'OPTIONS', 'time_units')] - # self.stress_periods = simdata[('TDIS', 'STRESS_PERIODS', - # 'perlen,nstp,tsmult') - # self.calendar_start_time = calendar_start_time - - # def get_stress_period_array(self): - # return np.arange(1, self.get_num_stress_periods(), 1, int) - def get_time_units(self): time_units = self.simdata.mfdata[ ("tdis", "options", "time_units") @@ -132,11 +114,3 @@ def get_sp_time_steps(self, sp_num): f"Stress period {sp_num} was requested but does not exist." ) return period_data[sp_num][1] - - # def get_stress_period(self, sp_num): - - # def remove_stress_period(self, num_stress_period): - - # def copy_append_stress_period(self, sp_num): - - # def split_stress_period(self, sp_num): diff --git a/flopy/mf6/data/mfdataarray.py b/flopy/mf6/data/mfdataarray.py index 786838083..661b91cc8 100644 --- a/flopy/mf6/data/mfdataarray.py +++ b/flopy/mf6/data/mfdataarray.py @@ -733,7 +733,7 @@ def _get_data(self, layer=None, apply_mult=False, **kwargs): "array" in kwargs and kwargs["array"] and isinstance(self, MFTransientArray) - and data is not [] + and data is not [] # noqa: F632 ): data = np.expand_dims(data, 0) return data @@ -1531,7 +1531,7 @@ def plot( List of unique values to be excluded from the plot. Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. @@ -2100,7 +2100,7 @@ def plot( extracted. (default is zero). Returns - ---------- + ------- axes : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. diff --git a/flopy/mf6/data/mfdatalist.py b/flopy/mf6/data/mfdatalist.py index f35147206..ab5a2aa66 100644 --- a/flopy/mf6/data/mfdatalist.py +++ b/flopy/mf6/data/mfdatalist.py @@ -136,7 +136,7 @@ def to_array(self, kper=0, mask=False): return array with np.nan instead of zero Returns - ---------- + ------- out : dict of numpy.ndarrays Dictionary of 3-D numpy arrays containing the stress period data for a selected stress period. The dictionary keys are the @@ -1451,7 +1451,7 @@ def plot( List of unique values to be excluded from the plot. Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. @@ -2122,7 +2122,7 @@ def plot( List of unique values to be excluded from the plot. Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. diff --git a/flopy/mf6/data/mfdataplist.py b/flopy/mf6/data/mfdataplist.py index dd397699d..48f1aa02e 100644 --- a/flopy/mf6/data/mfdataplist.py +++ b/flopy/mf6/data/mfdataplist.py @@ -3,6 +3,7 @@ import io import os import sys +import warnings import numpy as np import pandas @@ -303,36 +304,42 @@ def _add_cellid_fields(self, data, keep_existing=False): columns = data.columns.tolist() if isinstance(self._mg, StructuredGrid): if ( - "layer" in columns - and "row" in columns - and "column" in columns + "cellid_layer" in columns + and "cellid_row" in columns + and "cellid_column" in columns ): data["cellid"] = data[ - ["layer", "row", "column"] + ["cellid_layer", "cellid_row", "cellid_column"] ].apply(tuple, axis=1) if not keep_existing: data = data.drop( - columns=["layer", "row", "column"] + columns=[ + "cellid_layer", + "cellid_row", + "cellid_column", + ] ) elif isinstance(self._mg, VertexGrid): cell_2 = None - if "cell" in columns: - cell_2 = "cell" + if "cellid_cell" in columns: + cell_2 = "cellid_cell" elif "ncpl" in columns: - cell_2 = "ncpl" - if cell_2 is not None and "layer" in columns: - data["cellid"] = data[["layer", cell_2]].apply( - tuple, axis=1 - ) + cell_2 = "cellid_ncpl" + if cell_2 is not None and "cellid_layer" in columns: + data["cellid"] = data[ + ["cellid_layer", cell_2] + ].apply(tuple, axis=1) if not keep_existing: - data = data.drop(columns=["layer", cell_2]) + data = data.drop( + columns=["cellid_layer", cell_2] + ) elif isinstance(self._mg, UnstructuredGrid): - if "node" in columns: - data["cellid"] = data[["node"]].apply( + if "cellid_node" in columns: + data["cellid"] = data[["cellid_node"]].apply( tuple, axis=1 ) if not keep_existing: - data = data.drop(columns=["node"]) + data = data.drop(columns=["cellid_node"]) else: raise MFDataException( "ERROR: Unrecognized model grid " @@ -408,14 +415,20 @@ def _build_data_header(self): # get the appropriate cellid column headings for the # model's discretization type if isinstance(self._mg, StructuredGrid): - self._append_type_list("layer", i_type, True) - self._append_type_list("row", i_type, True) - self._append_type_list("column", i_type, True) + self._append_type_list( + "cellid_layer", i_type, True + ) + self._append_type_list("cellid_row", i_type, True) + self._append_type_list( + "cellid_column", i_type, True + ) elif isinstance(self._mg, VertexGrid): - self._append_type_list("layer", i_type, True) - self._append_type_list("cell", i_type, True) + self._append_type_list( + "cellid_layer", i_type, True + ) + self._append_type_list("cellid_cell", i_type, True) elif isinstance(self._mg, UnstructuredGrid): - self._append_type_list("node", i_type, True) + self._append_type_list("cellid_node", i_type, True) else: raise MFDataException( "ERROR: Unrecognized model grid " @@ -496,42 +509,44 @@ def _untuple_cellids(self, pdata): try: pdata.insert( loc=field_idx, - column=self._unique_column_name(pdata, "layer"), + column=self._unique_column_name(pdata, "cellid_layer"), value=pdata.apply(lambda x: x[column_name][0], axis=1), ) except (ValueError, TypeError): self._untuple_manually( pdata, field_idx, - self._unique_column_name(pdata, "layer"), + self._unique_column_name(pdata, "cellid_layer"), column_name, 0, ) try: pdata.insert( loc=field_idx + 1, - column=self._unique_column_name(pdata, "row"), + column=self._unique_column_name(pdata, "cellid_row"), value=pdata.apply(lambda x: x[column_name][1], axis=1), ) except (ValueError, TypeError): self._untuple_manually( pdata, field_idx + 1, - self._unique_column_name(pdata, "row"), + self._unique_column_name(pdata, "cellid_row"), column_name, 1, ) try: pdata.insert( loc=field_idx + 2, - column=self._unique_column_name(pdata, "column"), + column=self._unique_column_name( + pdata, "cellid_column" + ), value=pdata.apply(lambda x: x[column_name][2], axis=1), ) except (ValueError, TypeError): self._untuple_manually( pdata, field_idx + 2, - self._unique_column_name(pdata, "column"), + self._unique_column_name(pdata, "cellid_column"), column_name, 2, ) @@ -539,48 +554,48 @@ def _untuple_cellids(self, pdata): try: pdata.insert( loc=field_idx, - column=self._unique_column_name(pdata, "layer"), + column=self._unique_column_name(pdata, "cellid_layer"), value=pdata.apply(lambda x: x[column_name][0], axis=1), ) except (ValueError, TypeError): self._untuple_manually( pdata, field_idx, - self._unique_column_name(pdata, "layer"), + self._unique_column_name(pdata, "cellid_layer"), column_name, 0, ) try: pdata.insert( loc=field_idx + 1, - column=self._unique_column_name(pdata, "cell"), + column=self._unique_column_name(pdata, "cellid_cell"), value=pdata.apply(lambda x: x[column_name][1], axis=1), ) except (ValueError, TypeError): self._untuple_manually( pdata, field_idx + 1, - self._unique_column_name(pdata, "cell"), + self._unique_column_name(pdata, "cellid_cell"), column_name, 1, ) elif isinstance(self._mg, UnstructuredGrid): - if column_name == "node": + if column_name == "cellid_node": # fixing a problem where node was specified as a tuple # make sure new column is named properly - column_name = "node_2" - pdata = pdata.rename(columns={"node": column_name}) + column_name = "cellid_node_2" + pdata = pdata.rename(columns={"cellid_node": column_name}) try: pdata.insert( loc=field_idx, - column=self._unique_column_name(pdata, "node"), + column=self._unique_column_name(pdata, "cellid_node"), value=pdata.apply(lambda x: x[column_name][0], axis=1), ) except (ValueError, TypeError): self._untuple_manually( pdata, field_idx, - self._unique_column_name(pdata, "node"), + self._unique_column_name(pdata, "cellid_node"), column_name, 0, ) @@ -815,7 +830,7 @@ def to_array(self, kper=0, mask=False): return array with np.nan instead of zero Returns - ---------- + ------- out : dict of numpy.ndarrays Dictionary of 3-D numpy arrays containing the stress period data for a selected stress period. The dictionary keys are the @@ -1134,20 +1149,32 @@ def _dataframe_check(self, data_frame): break return valid - def _try_pandas_read(self, fd_data_file): + def _try_pandas_read(self, fd_data_file, file_name): delimiter_list = ["\\s+", ","] for delimiter in delimiter_list: try: - # read flopy formatted data, entire file - data_frame = pandas.read_csv( - fd_data_file, - sep=delimiter, - names=self._header_names, - dtype=self._data_header, - comment="#", - index_col=False, - skipinitialspace=True, - ) + with warnings.catch_warnings(record=True) as warn: + # read flopy formatted data, entire file + data_frame = pandas.read_csv( + fd_data_file, + sep=delimiter, + names=self._header_names, + dtype=self._data_header, + comment="#", + index_col=False, + skipinitialspace=True, + ) + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + for warning in warn: + print( + "Pandas warning occurred while loading data " + f"{self.path}:" + ) + print(f' Data File: "{file_name}:"') + print(f' Pandas Message: "{warning.message}"') except BaseException: fd_data_file.seek(0) continue @@ -1189,13 +1216,15 @@ def _read_text_data(self, fd_data_file, first_line, external_file=False): ) io_file_data = io.StringIO("\n".join(file_data)) if external_file: - data_frame = self._try_pandas_read(io_file_data) + data_frame = self._try_pandas_read(io_file_data, fd_data_file.name) if data_frame is not None: self._decrement_id_fields(data_frame) else: # get number of rows of data if len(file_data) > 0: - data_frame = self._try_pandas_read(io_file_data) + data_frame = self._try_pandas_read( + io_file_data, fd_data_file.name + ) if data_frame is not None: self._decrement_id_fields(data_frame) return_val = [True, fd_data_file.readline()] @@ -1239,7 +1268,6 @@ def _save_binary_data(self, fd_data_file, data): file_access.write_binary_file( self._dataframe_to_recarray(data), fd_data_file, - self._model_or_sim.modeldiscrit, ) data_storage = self._get_storage_obj() data_storage.internal_data = None @@ -1281,13 +1309,12 @@ def _load_external_data(self, data_storage): ) np_data = file_access.read_binary_data_from_file( file_path, - self._model_or_sim.modeldiscrit, build_cellid=False, ) pd_data = pandas.DataFrame(np_data) if "col" in pd_data: # keep layer/row/column names consistent - pd_data = pd_data.rename(columns={"col": "column"}) + pd_data = pd_data.rename(columns={"col": "cellid_column"}) self._decrement_id_fields(pd_data) else: with open(file_path, "r") as fd_data_file: @@ -1439,16 +1466,17 @@ def _update_id_fields(self, id_fields, data_item_struct, data_frame): an item in the expected data structure and the data provided. """ if data_item_struct.numeric_index or data_item_struct.is_cellid: - if data_item_struct.name.lower() == "cellid": + name = data_item_struct.name.lower() + if name.startswith("cellid"): if isinstance(self._mg, StructuredGrid): - id_fields.append("layer") - id_fields.append("row") - id_fields.append("column") + id_fields.append(f"{name}_layer") + id_fields.append(f"{name}_row") + id_fields.append(f"{name}_column") elif isinstance(self._mg, VertexGrid): - id_fields.append("layer") - id_fields.append("cell") + id_fields.append(f"{name}_layer") + id_fields.append(f"{name}_cell") elif isinstance(self._mg, UnstructuredGrid): - id_fields.append("node") + id_fields.append(f"{name}_node") else: raise MFDataException( "ERROR: Unrecognized model grid " @@ -1882,7 +1910,7 @@ def plot( List of unique values to be excluded from the plot. Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. @@ -2604,7 +2632,7 @@ def plot( List of unique values to be excluded from the plot. Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. diff --git a/flopy/mf6/data/mfdatascalar.py b/flopy/mf6/data/mfdatascalar.py index 8eb482428..94c4f1e3f 100644 --- a/flopy/mf6/data/mfdatascalar.py +++ b/flopy/mf6/data/mfdatascalar.py @@ -668,7 +668,8 @@ def plot(self, filename_base=None, file_extension=None, **kwargs): """ Helper method to plot scalar objects - Parameters: + Parameters + ---------- scalar : flopy.mf6.data.mfscalar object filename_base : str Base file name that will be used to automatically generate file @@ -976,7 +977,7 @@ def plot( extracted. (default is zero). Returns - ---------- + ------- axes : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. diff --git a/flopy/mf6/data/mfdatastorage.py b/flopy/mf6/data/mfdatastorage.py index 86fc50e24..7f66574c5 100644 --- a/flopy/mf6/data/mfdatastorage.py +++ b/flopy/mf6/data/mfdatastorage.py @@ -1597,15 +1597,6 @@ def _build_recarray(self, data, key, autofill): self._verify_list(new_data) return new_data - def _get_cellid_size(self, data_item_name): - model_num = DatumUtil.cellid_model_num( - data_item_name, - self.data_dimensions.structure.model_data, - self.data_dimensions.package_dim.model_dim, - ) - model_grid = self.data_dimensions.get_model_grid(model_num=model_num) - return model_grid.get_num_spatial_coordinates() - def make_tuple_cellids(self, data): # convert cellids from individual layer, row, column fields into # tuples (layer, row, column) @@ -1616,7 +1607,7 @@ def make_tuple_cellids(self, data): new_line = [] for item, is_cellid in zip(line, self.recarray_cellid_list_ex): if is_cellid: - cellid_size = self._get_cellid_size( + cellid_size = self.data_dimensions.get_cellid_size( self._recarray_type_list[data_idx][0], ) current_cellid += (item,) @@ -1761,10 +1752,7 @@ def store_external( self._stress_period, ) file_access.write_binary_file( - self.layer_storage.first_item().internal_data, - fp, - self._model_or_sim.modeldiscrit, - precision="double", + self.layer_storage.first_item().internal_data, fp ) else: # make sure folder exists @@ -1802,15 +1790,6 @@ def store_external( # set as external data self.layer_storage.first_item().internal_data = None else: - # if self.layer_storage.in_shape(layer_new): - # factor = self.layer_storage[layer_new].factor - # if preserve_record: - # adjustment = multiplier / factor - # if adjustment != 1.0: - # convert numbers to be multiplied by the - # original factor - # data = data * adjustment - # store data externally in file data_size = self.get_data_size(layer_new) data_type = data_dim.structure.data_item_structures[0].type @@ -2030,9 +2009,7 @@ def external_to_internal( self._stress_period, ) if self.layer_storage[layer].binary: - data = file_access.read_binary_data_from_file( - read_file, self._model_or_sim.modeldiscrit - ) + data = file_access.read_binary_data_from_file(read_file) data_out = self._build_recarray(data, layer, False) else: with open(read_file) as fd_read_file: @@ -2144,7 +2121,7 @@ def _validate_cellid(self, arr_line, data_index, data_item): return False if arr_line is None: return False - cellid_size = self._get_cellid_size(data_item.name) + cellid_size = self.data_dimensions.get_cellid_size(data_item.name) model_grid = self.data_dimensions.get_model_grid() if cellid_size + data_index > len(arr_line): return False @@ -2291,7 +2268,7 @@ def _verify_list(self, data): # this is a cell id. verify that it contains the # correct number of integers if cellid_size is None: - cellid_size = self._get_cellid_size( + cellid_size = self.data_dimensions.get_cellid_size( self._recarray_type_list[index][0] ) if ( @@ -2558,7 +2535,7 @@ def _fill_dimensions(self, data_iter, dimensions): data_array = np.ndarray(shape=dimensions, dtype=np_dtype) # fill array for index in ArrayIndexIter(dimensions): - data_array.itemset(index, next(data_iter)) + data_array[index] = next(data_iter) return data_array elif self.data_structure_type == DataStructureType.scalar: return next(data_iter) @@ -2833,6 +2810,7 @@ def build_type_list( data_item, data_set, data, + data_item_num=index, repeating_key=key, min_size=min_size, ) @@ -2873,7 +2851,9 @@ def build_type_list( ): # A cellid is a single entry (tuple) in the # recarray. Adjust dimensions accordingly. - size = self._get_cellid_size(data_item.name) + size = self.data_dimensions.get_cellid_size( + data_item.name + ) data_item.remove_cellid(resolved_shape, size) if not data_item.optional or not min_size: for index in range(0, resolved_shape[0]): @@ -2910,7 +2890,7 @@ def _append_type_lists(self, name, data_type, iscellid): if iscellid and self._model_or_sim.model_type is not None: # write each part of the cellid out as a separate entry # to _recarray_list_list_ex - cellid_size = self._get_cellid_size(name) + cellid_size = self.data_dimensions.get_cellid_size(name) # determine header for different grid types if cellid_size == 1: self._do_ex_list_append(name, int, iscellid) diff --git a/flopy/mf6/data/mfdatautil.py b/flopy/mf6/data/mfdatautil.py index 9a4d6cdbf..ec244c3fe 100644 --- a/flopy/mf6/data/mfdatautil.py +++ b/flopy/mf6/data/mfdatautil.py @@ -155,7 +155,7 @@ def list_to_array(sarr, model_grid, kper=0, mask=False): return array with np.nan instead of zero Returns - ---------- + ------- out : dict of numpy.ndarrays Dictionary of 3-D numpy arrays containing the stress period data for a selected stress period. The dictionary keys are the diff --git a/flopy/mf6/data/mffileaccess.py b/flopy/mf6/data/mffileaccess.py index 73f19f92b..e64ed695d 100644 --- a/flopy/mf6/data/mffileaccess.py +++ b/flopy/mf6/data/mffileaccess.py @@ -1046,18 +1046,14 @@ def __init__( self._last_line_info = [] self.simple_line = False - def read_binary_data_from_file( - self, read_file, modelgrid, precision="double", build_cellid=True - ): + def read_binary_data_from_file(self, read_file, build_cellid=True): # read from file - header, int_cellid_indexes, ext_cellid_indexes = self._get_header( - modelgrid, precision - ) + header, int_cellid_indexes, ext_cellid_indexes = self._get_header() file_array = np.fromfile(read_file, dtype=header, count=-1) if not build_cellid: return file_array # build data list for recarray - cellid_size = len(self._get_cell_header(modelgrid)) + cellid_size = {} data_list = [] for record in file_array: data_record = () @@ -1067,9 +1063,18 @@ def read_binary_data_from_file( if index in ext_cellid_indexes: current_cellid += (data_item - 1,) current_cellid_size += 1 - if current_cellid_size == cellid_size: - data_record += current_cellid - data_record = (data_record,) + rec_len = len(data_record) + if rec_len not in cellid_size: + data_item_struct = self.structure.data_item_structures[ + rec_len + ] + cellid_size[rec_len] = ( + self._data_dimensions.get_cellid_size( + data_item_struct.name + ) + ) + if current_cellid_size == cellid_size[rec_len]: + data_record += (current_cellid,) current_cellid = () current_cellid_size = 0 else: @@ -1077,18 +1082,14 @@ def read_binary_data_from_file( data_list.append(data_record) return data_list - def write_binary_file( - self, data, fname, modelgrid=None, precision="double" - ): + def write_binary_file(self, data, fname): fd = self._open_ext_file(fname, binary=True, write=True) - data_array = self._build_data_array(data, modelgrid, precision) + data_array = self._build_data_array(data) data_array.tofile(fd) fd.close() - def _build_data_array(self, data, modelgrid, precision): - header, int_cellid_indexes, ext_cellid_indexes = self._get_header( - modelgrid, precision - ) + def _build_data_array(self, data): + header, int_cellid_indexes, ext_cellid_indexes = self._get_header() data_list = [] for record in data: new_record = () @@ -1104,7 +1105,8 @@ def _build_data_array(self, data, modelgrid, precision): data_list.append(new_record) return np.array(data_list, dtype=header) - def _get_header(self, modelgrid, precision): + def _get_header(self): + np_int_type = np.int32 np_flt_type = np.float64 header = [] int_cellid_indexes = {} @@ -1112,14 +1114,21 @@ def _get_header(self, modelgrid, precision): ext_index = 0 for index, di_struct in enumerate(self.structure.data_item_structures): if di_struct.is_cellid: - cell_header = self._get_cell_header(modelgrid) + cell_header = self._get_cell_header( + di_struct, + self.structure.data_item_structures, + index, + ) header += cell_header int_cellid_indexes[index] = True for index in range(ext_index, ext_index + len(cell_header)): ext_cellid_indexes[index] = True ext_index += len(cell_header) elif not di_struct.optional: - header.append((di_struct.name, np_flt_type)) + if di_struct.type == DatumType.integer: + header.append((di_struct.name, np_int_type)) + else: + header.append((di_struct.name, np_flt_type)) ext_index += 1 elif di_struct.name == "aux": aux_var_names = ( @@ -1132,13 +1141,21 @@ def _get_header(self, modelgrid, precision): ext_index += 1 return header, int_cellid_indexes, ext_cellid_indexes - def _get_cell_header(self, modelgrid): - if modelgrid.grid_type == "structured": - return [("layer", np.int32), ("row", np.int32), ("col", np.int32)] - elif modelgrid.grid_type == "vertex": - return [("layer", np.int32), ("ncpl", np.int32)] + def _get_cell_header(self, data_item, data_set, index): + cellid_size = self._data_dimensions.get_cellid_size(data_item.name) + if cellid_size == 3: + return [ + (f"{data_item.name}_layer", np.int32), + (f"{data_item.name}_row", np.int32), + (f"{data_item.name}_column", np.int32), + ] + elif cellid_size == 2: + return [ + (f"{data_item.name}_layer", np.int32), + (f"{data_item.name}_cell", np.int32), + ] else: - return [("nodes", np.int32)] + return [(f"{data_item.name}_nodes", np.int32)] def load_from_package( self, first_line, file_handle, storage, pre_data_comments=None diff --git a/flopy/mf6/data/mfstructure.py b/flopy/mf6/data/mfstructure.py index f45625ed5..34a78a780 100644 --- a/flopy/mf6/data/mfstructure.py +++ b/flopy/mf6/data/mfstructure.py @@ -63,7 +63,7 @@ class Dfn: ----- Examples - ---- + -------- """ def __init__(self): @@ -179,7 +179,7 @@ class DfnPackage(Dfn): ----- Examples - ---- + -------- """ def __init__(self, package): @@ -447,7 +447,7 @@ class DfnFile(Dfn): ----- Examples - ---- + -------- """ def __init__(self, file): @@ -1440,7 +1440,6 @@ def __init__(self, data_item, model_data, package_type, dfn_list): self.parameter_name = data_item.parameter_name self.one_per_pkg = data_item.one_per_pkg - # self.data_item_structures_dict = {} self.data_item_structures = [] self.expected_data_items = {} self.shape = data_item.shape diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index 62945eec4..281173583 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -572,15 +572,10 @@ def modelgrid(self): else: return self._modelgrid - if self.get_grid_type() != DiscretizationType.DISV: - # get coordinate data from dis file - xorig = dis.xorigin.get_data() - yorig = dis.yorigin.get_data() - angrot = dis.angrot.get_data() - else: - xorig = self._modelgrid.xoffset - yorig = self._modelgrid.yoffset - angrot = self._modelgrid.angrot + # get coordinate data from dis file + xorig = dis.xorigin.get_data() + yorig = dis.yorigin.get_data() + angrot = dis.angrot.get_data() # resolve offsets if xorig is None: @@ -1275,11 +1270,15 @@ def get_ims_package(self): ------- IMS package : ModflowIms """ - solution_group = self.simulation.name_file.solutiongroup.get_data() + solution_group = self.simulation.name_file.solutiongroup.get_data(0) for record in solution_group: - for model_name in record[2:]: - if model_name == self.name: - return self.simulation.get_solution_package(record[1]) + for name in record.dtype.names: + if name == "slntype" or name == "slnfname": + continue + if record[name] == self.name: + return self.simulation.get_solution_package( + record.slnfname + ) return None def get_steadystate_list(self): diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index 43a49e5c3..3c809d667 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -1473,7 +1473,6 @@ def _write_block(self, fd, block_header, ext_file_action): if basic_list: ext_fname = dataset.external_file_name() if ext_fname is not None: - # if dataset.has_modified_ext_data(): binary = dataset.binary_ext_data() # write block contents to external file fd_main, fd = self._prepare_external( @@ -1503,7 +1502,6 @@ def _write_block(self, fd, block_header, ext_file_action): if basic_list: ext_fname = dataset.external_file_name(transient_key) if ext_fname is not None: - # if dataset.has_modified_ext_data(transient_key): binary = dataset.binary_ext_data(transient_key) # write block contents to external file fd_main, fd = self._prepare_external( @@ -3279,7 +3277,7 @@ def plot(self, **kwargs): MfList dictionary key. (default is None) Returns - ---------- + ------- axes : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis are returned. diff --git a/flopy/mf6/mfsimbase.py b/flopy/mf6/mfsimbase.py index e924bfebf..793bee6b9 100644 --- a/flopy/mf6/mfsimbase.py +++ b/flopy/mf6/mfsimbase.py @@ -603,7 +603,7 @@ def __repr__(self): Override __repr__ to print custom string. Returns - -------- + ------- repr string : str string describing object @@ -615,7 +615,7 @@ def __str__(self): Override __str__ to print custom string. Returns - -------- + ------- str string : str string describing object @@ -671,7 +671,7 @@ def model_names(self): Return a list of model names associated with this simulation. Returns - -------- + ------- list: list of model names """ @@ -683,7 +683,7 @@ def exchange_files(self): Return list of exchange files associated with this simulation. Returns - -------- + ------- list: list of exchange names """ @@ -1699,7 +1699,7 @@ def run_simulation( default is None, i.e. use the builtion print Returns - -------- + ------- success : bool buff : list of lines of stdout @@ -1786,7 +1786,7 @@ def model_dict(self): Return a dictionary of models associated with this simulation. Returns - -------- + ------- model dict : dict dictionary of models @@ -1805,7 +1805,7 @@ def get_model(self, model_name=None): will get the first model. Returns - -------- + ------- model : MFModel """ @@ -1833,7 +1833,7 @@ def get_exchange_file(self, filename): Name of exchange file to get Returns - -------- + ------- exchange package : MFPackage """ @@ -1853,7 +1853,7 @@ def get_file(self, filename): Name of mover file to get Returns - -------- + ------- mover package : MFPackage """ @@ -2076,7 +2076,7 @@ def register_package( Produce a filename for this package Returns - -------- + ------- (path : tuple, package structure : MFPackageStructure) """ @@ -2213,7 +2213,7 @@ def register_model(self, model, model_type, model_name, model_namefile): Solution group of model Returns - -------- + ------- model_structure_object : MFModelStructure """ @@ -2262,7 +2262,7 @@ def get_solution_package(self, key): solution package file name Returns - -------- + ------- solution_package : MFPackage """ @@ -2318,7 +2318,7 @@ def is_valid(self): Returns - -------- + ------- valid : bool Whether this is a valid simulation @@ -2556,7 +2556,7 @@ def plot( MFList dictionary key. (default is None) Returns - -------- + ------- axes: (list) matplotlib.pyplot.axes objects diff --git a/flopy/mf6/modflow/mfgnc.py b/flopy/mf6/modflow/mfgnc.py index 434f39752..b40a6493a 100644 --- a/flopy/mf6/modflow/mfgnc.py +++ b/flopy/mf6/modflow/mfgnc.py @@ -229,7 +229,7 @@ class GncPackages(mfpackage.MFChildPackages): GncPackages is a container class for the ModflowGnc class. Methods - ---------- + ------- initialize Initializes a new ModflowGnc package removing any sibling child packages attached to the same parent package. See ModflowGnc init diff --git a/flopy/mf6/modflow/mfgwemve.py b/flopy/mf6/modflow/mfgwemve.py index e57ef4646..744439c1e 100644 --- a/flopy/mf6/modflow/mfgwemve.py +++ b/flopy/mf6/modflow/mfgwemve.py @@ -197,7 +197,7 @@ class GwemvePackages(mfpackage.MFChildPackages): GwemvePackages is a container class for the ModflowGwemve class. Methods - ---------- + ------- initialize Initializes a new ModflowGwemve package removing any sibling child packages attached to the same parent package. See ModflowGwemve init diff --git a/flopy/mf6/modflow/mfgwfgnc.py b/flopy/mf6/modflow/mfgwfgnc.py index 445737947..7f2a16ec1 100644 --- a/flopy/mf6/modflow/mfgwfgnc.py +++ b/flopy/mf6/modflow/mfgwfgnc.py @@ -229,7 +229,7 @@ class GwfgncPackages(mfpackage.MFChildPackages): GwfgncPackages is a container class for the ModflowGwfgnc class. Methods - ---------- + ------- initialize Initializes a new ModflowGwfgnc package removing any sibling child packages attached to the same parent package. See ModflowGwfgnc init diff --git a/flopy/mf6/modflow/mfgwfmvr.py b/flopy/mf6/modflow/mfgwfmvr.py index 73949339d..1423f652a 100644 --- a/flopy/mf6/modflow/mfgwfmvr.py +++ b/flopy/mf6/modflow/mfgwfmvr.py @@ -406,7 +406,7 @@ class GwfmvrPackages(mfpackage.MFChildPackages): GwfmvrPackages is a container class for the ModflowGwfmvr class. Methods - ---------- + ------- initialize Initializes a new ModflowGwfmvr package removing any sibling child packages attached to the same parent package. See ModflowGwfmvr init diff --git a/flopy/mf6/modflow/mfgwtmvt.py b/flopy/mf6/modflow/mfgwtmvt.py index 9bfdf2438..d67b24494 100644 --- a/flopy/mf6/modflow/mfgwtmvt.py +++ b/flopy/mf6/modflow/mfgwtmvt.py @@ -197,7 +197,7 @@ class GwtmvtPackages(mfpackage.MFChildPackages): GwtmvtPackages is a container class for the ModflowGwtmvt class. Methods - ---------- + ------- initialize Initializes a new ModflowGwtmvt package removing any sibling child packages attached to the same parent package. See ModflowGwtmvt init diff --git a/flopy/mf6/modflow/mfmvr.py b/flopy/mf6/modflow/mfmvr.py index 2d6841226..2d30d3e13 100644 --- a/flopy/mf6/modflow/mfmvr.py +++ b/flopy/mf6/modflow/mfmvr.py @@ -406,7 +406,7 @@ class MvrPackages(mfpackage.MFChildPackages): MvrPackages is a container class for the ModflowMvr class. Methods - ---------- + ------- initialize Initializes a new ModflowMvr package removing any sibling child packages attached to the same parent package. See ModflowMvr init diff --git a/flopy/mf6/modflow/mfmvt.py b/flopy/mf6/modflow/mfmvt.py index 8c16eea4a..a4994f084 100644 --- a/flopy/mf6/modflow/mfmvt.py +++ b/flopy/mf6/modflow/mfmvt.py @@ -197,7 +197,7 @@ class MvtPackages(mfpackage.MFChildPackages): MvtPackages is a container class for the ModflowMvt class. Methods - ---------- + ------- initialize Initializes a new ModflowMvt package removing any sibling child packages attached to the same parent package. See ModflowMvt init diff --git a/flopy/mf6/modflow/mfutlats.py b/flopy/mf6/modflow/mfutlats.py index c8752d9f6..09925eb87 100644 --- a/flopy/mf6/modflow/mfutlats.py +++ b/flopy/mf6/modflow/mfutlats.py @@ -177,7 +177,7 @@ class UtlatsPackages(mfpackage.MFChildPackages): UtlatsPackages is a container class for the ModflowUtlats class. Methods - ---------- + ------- initialize Initializes a new ModflowUtlats package removing any sibling child packages attached to the same parent package. See ModflowUtlats init diff --git a/flopy/mf6/modflow/mfutlobs.py b/flopy/mf6/modflow/mfutlobs.py index a917c35d8..8464e6d89 100644 --- a/flopy/mf6/modflow/mfutlobs.py +++ b/flopy/mf6/modflow/mfutlobs.py @@ -217,7 +217,7 @@ class UtlobsPackages(mfpackage.MFChildPackages): UtlobsPackages is a container class for the ModflowUtlobs class. Methods - ---------- + ------- initialize Initializes a new ModflowUtlobs package removing any sibling child packages attached to the same parent package. See ModflowUtlobs init diff --git a/flopy/mf6/modflow/mfutltas.py b/flopy/mf6/modflow/mfutltas.py index 078f1a8f2..b00d56dbc 100644 --- a/flopy/mf6/modflow/mfutltas.py +++ b/flopy/mf6/modflow/mfutltas.py @@ -196,7 +196,7 @@ class UtltasPackages(mfpackage.MFChildPackages): UtltasPackages is a container class for the ModflowUtltas class. Methods - ---------- + ------- initialize Initializes a new ModflowUtltas package removing any sibling child packages attached to the same parent package. See ModflowUtltas init diff --git a/flopy/mf6/modflow/mfutlts.py b/flopy/mf6/modflow/mfutlts.py index 38e70b352..3113e423e 100644 --- a/flopy/mf6/modflow/mfutlts.py +++ b/flopy/mf6/modflow/mfutlts.py @@ -272,7 +272,7 @@ class UtltsPackages(mfpackage.MFChildPackages): UtltsPackages is a container class for the ModflowUtlts class. Methods - ---------- + ------- initialize Initializes a new ModflowUtlts package removing any sibling child packages attached to the same parent package. See ModflowUtlts init diff --git a/flopy/mf6/modflow/mfutltvk.py b/flopy/mf6/modflow/mfutltvk.py index 4f72614eb..1d7740351 100644 --- a/flopy/mf6/modflow/mfutltvk.py +++ b/flopy/mf6/modflow/mfutltvk.py @@ -236,7 +236,7 @@ class UtltvkPackages(mfpackage.MFChildPackages): UtltvkPackages is a container class for the ModflowUtltvk class. Methods - ---------- + ------- initialize Initializes a new ModflowUtltvk package removing any sibling child packages attached to the same parent package. See ModflowUtltvk init diff --git a/flopy/mf6/modflow/mfutltvs.py b/flopy/mf6/modflow/mfutltvs.py index a21b93b8d..d9fe1111c 100644 --- a/flopy/mf6/modflow/mfutltvs.py +++ b/flopy/mf6/modflow/mfutltvs.py @@ -236,7 +236,7 @@ class UtltvsPackages(mfpackage.MFChildPackages): UtltvsPackages is a container class for the ModflowUtltvs class. Methods - ---------- + ------- initialize Initializes a new ModflowUtltvs package removing any sibling child packages attached to the same parent package. See ModflowUtltvs init diff --git a/flopy/mf6/utils/binaryfile_utils.py b/flopy/mf6/utils/binaryfile_utils.py index aba094a04..91bcd0abd 100644 --- a/flopy/mf6/utils/binaryfile_utils.py +++ b/flopy/mf6/utils/binaryfile_utils.py @@ -57,7 +57,7 @@ class MFOutputRequester: binary data from the SimulationDict() object on the fly without actually storing it in the SimulationDict() object. - Parameters: + Parameters ---------- mfdict: dict local instance of the SimulationDict() object @@ -66,12 +66,12 @@ class MFOutputRequester: key: tuple user requested data key - Methods: + Methods ------- MFOutputRequester.querybinarydata returns: Xarray object - Examples: + Examples -------- >>> data = MFOutputRequester(mfdict, path, key) >>> data.querybinarydata diff --git a/flopy/mf6/utils/binarygrid_util.py b/flopy/mf6/utils/binarygrid_util.py index 38df3b961..21ecedbc1 100644 --- a/flopy/mf6/utils/binarygrid_util.py +++ b/flopy/mf6/utils/binarygrid_util.py @@ -280,13 +280,10 @@ def __get_iverts(self): """ iverts = None if "IAVERT" in self._datadict: - if self._grid_type == "DISV": - nsize = self.ncpl - elif self._grid_type == "DISU": - nsize = self.nodes iverts = [] iavert = self.iavert javert = self.javert + nsize = iavert.shape[0] - 1 for ivert in range(nsize): i0 = iavert[ivert] i1 = iavert[ivert + 1] diff --git a/flopy/mf6/utils/lakpak_utils.py b/flopy/mf6/utils/lakpak_utils.py index 9dc328293..8b1e1b25b 100644 --- a/flopy/mf6/utils/lakpak_utils.py +++ b/flopy/mf6/utils/lakpak_utils.py @@ -125,7 +125,7 @@ def get_lak_connections(modelgrid, lake_map, idomain=None, bedleak=None): unique = np.unique(lake_map) # exclude lakes with lake numbers less than 0 - idx = np.where(unique > -1) + idx = np.asarray(unique > -1).nonzero() unique = unique[idx] dx, dy = None, None @@ -199,7 +199,9 @@ def get_lak_connections(modelgrid, lake_map, idomain=None, bedleak=None): # reset idomain for lake if iconn > 0: - idx = np.where((lake_map == lake_number) & (idomain > 0)) + idx = np.asarray( + (lake_map == lake_number) & (idomain > 0) + ).nonzero() idomain[idx] = 0 return idomain, connection_dict, connectiondata diff --git a/flopy/mf6/utils/mfobservation.py b/flopy/mf6/utils/mfobservation.py index 4699e7d45..33e3eefe1 100644 --- a/flopy/mf6/utils/mfobservation.py +++ b/flopy/mf6/utils/mfobservation.py @@ -53,15 +53,16 @@ class Observations: Simple class to extract and view Observation files for Uzf models (possibly all obs/hobs)? - Input: - ------ - fi = (string) name of the observation binary output file + Parameters + ---------- + fi : str + name of the observation binary output file - Methods: - -------- + Methods + ------- get_data(): (np.array) returns array of observation data parameters: - ----------- + ---------- text = (str) specific modflow record name contained in Obs.out file idx = (int), (slice(start, stop)) integer or slice of data to be returned. corresponds to kstp*kper - 1 @@ -478,7 +479,7 @@ def _get_obsfile_names(self, partial_key, OBS8, obstype): obstype: (string) SINGLE or CONTINUOUS Returns: - -------- + ------- sets key: path to self.obs_dataDict """ diff --git a/flopy/mf6/utils/model_splitter.py b/flopy/mf6/utils/model_splitter.py index 731387761..b16ba93b6 100644 --- a/flopy/mf6/utils/model_splitter.py +++ b/flopy/mf6/utils/model_splitter.py @@ -316,7 +316,7 @@ def load_node_mapping(self, sim, filename): for mkey in models: ncpl = self._new_ncpl[mkey] array = np.full((ncpl,), -1, dtype=int) - onode = np.where(model_array == mkey)[0] + onode = np.asarray(model_array == mkey).nonzero()[0] nnode = split_array[onode] array[nnode] = onode grid_info[mkey] = (array,) @@ -413,7 +413,7 @@ def optimize_splitting_mask(self, nparts): membership = np.array(membership, dtype=int) if laks: for lak in laks: - idx = np.where(lak_array == lak)[0] + idx = np.asarray(lak_array == lak).nonzero()[0] mnum = np.unique(membership[idx])[0] membership[idx] = mnum @@ -429,7 +429,7 @@ def optimize_splitting_mask(self, nparts): ev = np.equal(mnums1, mnums2) if np.all(ev): continue - idx = np.where(~ev)[0] + idx = np.asarray(~ev).nonzero()[0] mnum_to = mnums1[idx] adj_nodes = nodes2[idx] membership[adj_nodes] = mnum_to @@ -471,7 +471,7 @@ def reconstruct_array(self, arrays): array = array.ravel() ncpl = self._new_ncpl[mkey] mapping = self._grid_info[mkey][-1] - old_nodes = np.where(mapping != -1) + old_nodes = np.asarray(mapping != -1).nonzero() new_nodes = mapping[old_nodes] old_nodes = np.tile(old_nodes, (nlay, 1)) @@ -645,7 +645,7 @@ def _remap_nodes(self, array): bad_keys = [] for mkey in mkeys: count = 0 - mask = np.where(array == mkey) + mask = np.asarray(array == mkey).nonzero() for arr in idomain: check = arr[mask] count += np.count_nonzero(check) @@ -670,7 +670,7 @@ def _remap_nodes(self, array): if self._modelgrid.grid_type == "structured": a = array.reshape(self._modelgrid.nrow, self._modelgrid.ncol) for m in np.unique(a): - cells = np.where(a == m) + cells = np.asarray(a == m).nonzero() rmin, rmax = np.min(cells[0]), np.max(cells[0]) cmin, cmax = np.min(cells[1]), np.max(cells[1]) cellids = list(zip([0] * len(cells[0]), cells[0], cells[1])) @@ -702,7 +702,7 @@ def _remap_nodes(self, array): xverts, yverts = None, None for m in np.unique(array): - cells = np.where(array == m)[0] + cells = np.asarray(array == m).nonzero()[0] mapping = np.zeros( ( len( @@ -718,9 +718,9 @@ def _remap_nodes(self, array): if xverts is not None: mxv = xverts[cells] myv = yverts[cells] - xmidx = np.where(mxv == np.nanmin(mxv))[0] + xmidx = np.asarray(mxv == np.nanmin(mxv)).nonzero()[0] myv = myv[xmidx] - ymidx = np.where(myv == np.nanmin(myv))[0] + ymidx = np.asarray(myv == np.nanmin(myv)).nonzero()[0] self._offsets[m] = { "xorigin": np.nanmin(mxv[xmidx[0]]), @@ -736,11 +736,11 @@ def _remap_nodes(self, array): new_ncpl[m] *= i for mdl in np.unique(array): - mnodes = np.where(array == mdl)[0] + mnodes = np.asarray(array == mdl).nonzero()[0] mg_info = grid_info[mdl] if mg_info is not None: mapping = mg_info[-1] - new_nodes = np.where(mapping != -1)[0] + new_nodes = np.asarray(mapping != -1).nonzero()[0] old_nodes = mapping[new_nodes] for ix, nnode in enumerate(new_nodes): self._node_map[old_nodes[ix]] = (mdl, nnode) @@ -1163,7 +1163,7 @@ def _remap_array(self, item, mfarray, mapped_data, **kwargs): new_ncpl = self._new_ncpl[mkey] new_array = np.zeros(new_ncpl * nlay, dtype=dtype) mapping = self._grid_info[mkey][-1] - new_nodes = np.where(mapping != -1) + new_nodes = np.asarray(mapping != -1).nonzero() old_nodes = mapping[new_nodes] old_nodes = np.tile(old_nodes, (nlay, 1)) @@ -1263,7 +1263,7 @@ def _remap_mflist( new_model, new_node = self._get_new_model_new_node(nodes) for mkey, model in self._model_dict.items(): - idx = np.where(new_model == mkey)[0] + idx = np.asarray(new_model == mkey).nonzero()[0] if self._pkg_mover and transient: mvr_remap = { idx[i]: (model.name, i) for i in range(len(idx)) @@ -1363,7 +1363,7 @@ def _remap_uzf(self, package, mapped_data): name = package.filename self._uzf_remaps[name] = {} for mkey, model in self._model_dict.items(): - idx = np.where(new_model == mkey)[0] + idx = np.asarray(new_model == mkey).nonzero()[0] if len(idx) == 0: new_recarray = None else: @@ -1401,7 +1401,9 @@ def _remap_uzf(self, package, mapped_data): spd = {} for per, recarray in perioddata.items(): - idx = np.where(np.isin(recarray.ifno, uzf_nodes)) + idx = np.asarray( + np.isin(recarray.ifno, uzf_nodes) + ).nonzero() new_period = recarray[idx] new_period["ifno"] = [ uzf_remap[i] for i in new_period["ifno"] @@ -1547,7 +1549,7 @@ def _remap_lak(self, package, mapped_data): new_model, new_node = self._get_new_model_new_node(nodes) for mkey, model in self._model_dict.items(): - idx = np.where(new_model == mkey)[0] + idx = np.asarray(new_model == mkey).nonzero()[0] if len(idx) == 0: new_recarray = None else: @@ -1586,7 +1588,9 @@ def _remap_lak(self, package, mapped_data): if meta[0] == mkey: mapnos.append(lak) - idxs = np.where(np.isin(outlets.lakein, mapnos))[0] + idxs = np.asarray( + np.isin(outlets.lakein, mapnos) + ).nonzero()[0] if len(idxs) == 0: new_outlets = None else: @@ -1680,7 +1684,7 @@ def _remap_sfr(self, package, mapped_data): new_model, new_node = self._get_new_model_new_node(nodes) for mkey, model in self._model_dict.items(): - idx = np.where(new_model == mkey)[0] + idx = np.asarray(new_model == mkey).nonzero()[0] if len(idx) == 0: new_recarray = None continue @@ -1709,7 +1713,9 @@ def _remap_sfr(self, package, mapped_data): ) # now let's remap connection data and tag external exchanges - idx = np.where(np.isin(connectiondata.ifno, old_rno))[0] + idx = np.asarray( + np.isin(connectiondata.ifno, old_rno) + ).nonzero()[0] new_connectiondata = connectiondata[idx] ncons = [] for ix, rec in enumerate(new_connectiondata): @@ -1776,8 +1782,12 @@ def _remap_sfr(self, package, mapped_data): if m0 != m1: div_mover_ix.append(ix) - idx = np.where(np.isin(diversions.ifno, old_rno))[0] - idx = np.where(~np.isin(idx, div_mover_ix))[0] + idx = np.asarray( + np.isin(diversions.ifno, old_rno) + ).nonzero()[0] + idx = np.asarray( + ~np.isin(idx, div_mover_ix) + ).nonzero()[0] new_diversions = diversions[idx] new_rno = [ @@ -1802,23 +1812,25 @@ def _remap_sfr(self, package, mapped_data): # now we can do the stress period data spd = {} for kper, recarray in perioddata.items(): - idx = np.where(np.isin(recarray.ifno, old_rno))[0] + idx = np.asarray( + np.isin(recarray.ifno, old_rno) + ).nonzero()[0] new_spd = recarray[idx] if diversions is not None: - external_divs = np.where( + external_divs = np.asarray( np.isin(new_spd.idv, list(div_mvr_conn.keys())) - )[0] + ).nonzero()[0] if len(external_divs) > 0: for ix in external_divs: rec = recarray[ix] idv = recarray["idv"] div_mvr_conn[idv].append(rec["divflow"]) - idx = np.where( + idx = np.asarray( ~np.isin( new_spd.idv, list(div_mvr_conn.keys()) ) - )[0] + ).nonzero()[0] new_spd = new_spd[idx] @@ -1931,7 +1943,7 @@ def _remap_maw(self, package, mapped_data): maw_remaps = {} for mkey, model in self._model_dict.items(): - idx = np.where(new_model == mkey)[0] + idx = np.asarray(new_model == mkey).nonzero()[0] new_connectiondata = connectiondata[idx] if len(new_connectiondata) == 0: continue @@ -1965,7 +1977,9 @@ def _remap_maw(self, package, mapped_data): spd = {} for per, recarray in perioddata.items(): - idx = np.where(np.isin(recarray.ifno, maw_wellnos))[0] + idx = np.asarray( + np.isin(recarray.ifno, maw_wellnos) + ).nonzero()[0] if len(idx) > 0: new_recarray = recarray[idx] new_wellno = [ @@ -2030,7 +2044,7 @@ def _remap_csub(self, package, mapped_data): ninterbeds = None for mkey, model in self._model_dict.items(): - idx = np.where(new_model == mkey)[0] + idx = np.asarray(new_model == mkey).nonzero()[0] if len(idx) == 0: new_packagedata = None else: @@ -2052,7 +2066,7 @@ def _remap_csub(self, package, mapped_data): layers, nodes = self._cellid_to_layer_node(recarray.cellid) new_model, new_node = self._get_new_model_new_node(nodes) - idx = np.where(new_model == mkey)[0] + idx = np.asarray(new_model == mkey).nonzero()[0] if len(idx) == 0: continue @@ -2158,7 +2172,7 @@ def _remap_hfb(self, package, mapped_data): raise AssertionError("Models cannot be split along faults") for mkey, model in self._model_dict.items(): - idx = np.where(new_model1 == mkey)[0] + idx = np.asarray(new_model1 == mkey).nonzero()[0] if len(idx) == 0: new_recarray = None else: @@ -2262,7 +2276,7 @@ def _remap_obs(self, package, mapped_data, remapper, pkg_type=None): dtype=object, ) for mkey, model in self._model_dict.items(): - idx = np.where(new_model1 == mkey) + idx = np.asarray(new_model1 == mkey).nonzero() tmp_cellid = self._new_node_to_cellid( model, new_node1, layers1, idx ) @@ -2297,7 +2311,7 @@ def _remap_obs(self, package, mapped_data, remapper, pkg_type=None): ) for idt in set(idtype): remaps = remapper[idt] - idx = np.where(idtype == idt) + idx = np.asarray(idtype == idt).nonzero() new_cellid1[idx] = [ ( remaps[i][-1] + 1 @@ -2364,7 +2378,7 @@ def _remap_obs(self, package, mapped_data, remapper, pkg_type=None): dtype=object, ) for mkey, model in self._model_dict.items(): - idx = np.where(new_model1 == mkey) + idx = np.asarray(new_model1 == mkey).nonzero() idx = [ ix for ix, i in enumerate(recarray.id[idx]) @@ -2399,7 +2413,7 @@ def _remap_obs(self, package, mapped_data, remapper, pkg_type=None): new_model1[mm_idx] = tmp_models cellid2 = recarray.id2 - conv_idx = np.where((cellid2 is not None))[0] + conv_idx = np.asarray(cellid2 != None).nonzero()[0] # noqa: E711 if len(conv_idx) > 0: # do stuff # need to trap layers... if pkg_type is None: @@ -2454,9 +2468,9 @@ def _remap_obs(self, package, mapped_data, remapper, pkg_type=None): (len(new_node2),), None, dtype=object ) for mkey, model in self._model_dict.items(): - idx = np.where(new_model2 == mkey) + idx = np.asarray(new_model2 == mkey).nonzero() tmp_node = new_node2[idx] - cidx = np.where((tmp_node is not None)) + cidx = np.asarray((tmp_node != None)).nonzero() # noqa: E711 tmp_cellid = model.modelgrid.get_lrc( tmp_node[cidx].to_list() ) @@ -2501,7 +2515,7 @@ def _remap_obs(self, package, mapped_data, remapper, pkg_type=None): if idt is None: continue remaps = remapper[idt] - idx = np.where(idtype == idt) + idx = np.asarray(idtype == idt).nonzero() new_cellid2[idx] = [ ( remaps[i][-1] + 1 @@ -2536,7 +2550,7 @@ def _remap_obs(self, package, mapped_data, remapper, pkg_type=None): new_model1[idx] = mkey # now we remap the continuous data!!!! - idx = np.where(new_model1 == mkey)[0] + idx = np.asarray(new_model1 == mkey).nonzero()[0] if len(idx) == 0: continue @@ -2682,7 +2696,7 @@ def _remap_adv_tag(self, mkey, recarray, item, mapper): if meta[0] == mkey: mapnos.append(lak) - idxs = np.where(np.isin(recarray[item], mapnos))[0] + idxs = np.asarray(np.isin(recarray[item], mapnos)).nonzero()[0] if len(idxs) == 0: new_recarray = None else: diff --git a/flopy/mfusg/mfusg.py b/flopy/mfusg/mfusg.py index 9c01130a0..6f84d2b40 100644 --- a/flopy/mfusg/mfusg.py +++ b/flopy/mfusg/mfusg.py @@ -307,7 +307,7 @@ def _load_packages( Option to raise exceptions on package load failure. Returns - ---------- + ------- files_successfully_loaded : list of loaded files files_not_loaded : list of files that were not loaded """ diff --git a/flopy/mfusg/mfusgbcf.py b/flopy/mfusg/mfusgbcf.py index ea85670e3..290b8f37b 100644 --- a/flopy/mfusg/mfusgbcf.py +++ b/flopy/mfusg/mfusgbcf.py @@ -74,7 +74,7 @@ class MfUsgBcf(ModflowBcf): is the vertical hydraulic conductivity of the cell and the leakance is computed for each vertical connection. sf1 : float or array of floats (nlay, nrow, ncol) - specific storage (confined) or storage coefficient (unconfined), + specific storage (confined) or specific yield (unconfined), read when there is at least one transient stress period. (default is 1e-5) sf2 : float or array of floats (nlay, nrow, ncol) diff --git a/flopy/mfusg/mfusgcln.py b/flopy/mfusg/mfusgcln.py index 63af3f7f7..04b372381 100644 --- a/flopy/mfusg/mfusgcln.py +++ b/flopy/mfusg/mfusgcln.py @@ -1,4 +1,3 @@ -# pylint: disable=E1101 """ Mfusgcln module. diff --git a/flopy/mfusg/mfusglpf.py b/flopy/mfusg/mfusglpf.py index 5e416f121..a3ca1bab0 100644 --- a/flopy/mfusg/mfusglpf.py +++ b/flopy/mfusg/mfusglpf.py @@ -776,7 +776,6 @@ def _load_layer_properties( parm_dict = {} if nplpf > 0: par_types, parm_dict = mfpar.load(f_obj, nplpf, model.verbose) - # print parm_dict # non-parameter data transient = not dis.steady.all() diff --git a/flopy/mfusg/mfusgsms.py b/flopy/mfusg/mfusgsms.py index 0866a7ae3..865e9d5f4 100644 --- a/flopy/mfusg/mfusgsms.py +++ b/flopy/mfusg/mfusgsms.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-instance-attributes """ mfusgsms module. This is the solver for MODFLOW-USG. diff --git a/flopy/mfusg/mfusgwel.py b/flopy/mfusg/mfusgwel.py index bb3ca0bbe..493c7a19c 100644 --- a/flopy/mfusg/mfusgwel.py +++ b/flopy/mfusg/mfusgwel.py @@ -249,9 +249,10 @@ def __init__( def _check_for_aux(self, options, cln=False): """Check dtype for auxiliary variables, and add to options. - Parameters: + Parameters ---------- - options: (list) package options + options: list + package options Returns ------- @@ -278,9 +279,10 @@ def _check_for_aux(self, options, cln=False): def write_file(self, f=None): """Write the package file. - Parameters: + Parameters ---------- - f: (str) optional file name + f : str, optional + file name Returns ------- diff --git a/flopy/modflow/mf.py b/flopy/modflow/mf.py index c02ecf839..b1c2a65ae 100644 --- a/flopy/modflow/mf.py +++ b/flopy/modflow/mf.py @@ -35,8 +35,7 @@ def __repr__(self): return "Global Package class" def write_file(self): - # Not implemented for global class - return + raise NotImplementedError class ModflowList(Package): @@ -53,8 +52,7 @@ def __repr__(self): return "List Package class" def write_file(self): - # Not implemented for list class - return + raise NotImplementedError class Modflow(BaseModel): @@ -149,9 +147,6 @@ def __init__( # external option stuff self.array_free_format = True self.array_format = "modflow" - # self.external_fnames = [] - # self.external_units = [] - # self.external_binflag = [] self.load_fail = False # the starting external data unit number @@ -239,16 +234,6 @@ def __repr__(self): ) return s - # - # def next_ext_unit(self): - # """ - # Function to encapsulate next_ext_unit attribute - # - # """ - # next_unit = self.__next_ext_unit + 1 - # self.__next_ext_unit += 1 - # return next_unit - @property def modeltime(self): if self.get_package("disu") is not None: diff --git a/flopy/modflow/mfbas.py b/flopy/modflow/mfbas.py index bae50fdb5..66433fd3d 100644 --- a/flopy/modflow/mfbas.py +++ b/flopy/modflow/mfbas.py @@ -141,8 +141,6 @@ def __init__( self.ichflg = ichflg self.stoper = stoper - # self.ifrefm = ifrefm - # model.array_free_format = ifrefm model.free_format_input = ifrefm self.hnoflo = hnoflo diff --git a/flopy/modflow/mfbcf.py b/flopy/modflow/mfbcf.py index f52b738bf..83d676448 100644 --- a/flopy/modflow/mfbcf.py +++ b/flopy/modflow/mfbcf.py @@ -45,7 +45,7 @@ class ModflowBcf(Package): vcont : float or array of floats (nlay-1, nrow, ncol) vertical leakance between layers (default is 1.0) sf1 : float or array of floats (nlay, nrow, ncol) - specific storage (confined) or storage coefficient (unconfined), + specific storage (confined) or specific yield (unconfined), read when there is at least one transient stress period. (default is 1e-5) sf2 : float or array of floats (nrow, ncol) diff --git a/flopy/modflow/mfbct.py b/flopy/modflow/mfbct.py index 6f0db5368..dbd511521 100644 --- a/flopy/modflow/mfbct.py +++ b/flopy/modflow/mfbct.py @@ -70,8 +70,6 @@ def __init__( self.porosity = Util3d( model, (nlay, nrow, ncol), np.float32, porosity, "porosity" ) - # self.arad = Util2d(model, (1, nja), np.float32, - # arad, 'arad') self.dlh = Util3d(model, (nlay, nrow, ncol), np.float32, dlh, "dlh") self.dlv = Util3d(model, (nlay, nrow, ncol), np.float32, dlv, "dlv") self.dth = Util3d(model, (nlay, nrow, ncol), np.float32, dth, "dth") diff --git a/flopy/modflow/mffhb.py b/flopy/modflow/mffhb.py index 2aec2aa09..7e68a08be 100644 --- a/flopy/modflow/mffhb.py +++ b/flopy/modflow/mffhb.py @@ -210,7 +210,7 @@ def __init__( ds5 = ds5.to_records(index=False) # convert numpy array to a recarray if ds5.dtype != dtype: - ds5 = np.core.records.fromarrays(ds5.transpose(), dtype=dtype) + ds5 = np.rec.fromarrays(ds5.transpose(), dtype=dtype) # assign dataset 5 self.ds5 = ds5 @@ -229,7 +229,7 @@ def __init__( ds7 = ds7.to_records(index=False) # convert numpy array to a recarray if ds7.dtype != dtype: - ds7 = np.core.records.fromarrays(ds7.transpose(), dtype=dtype) + ds7 = np.rec.fromarrays(ds7.transpose(), dtype=dtype) # assign dataset 7 self.ds7 = ds7 @@ -335,7 +335,6 @@ def write_file(self): """ nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper f = open(self.fn_path, "w") - # f.write('{0:s}\n'.format(self.heading)) # Data set 1 f.write(f"{self.nbdtim} ") diff --git a/flopy/modflow/mfflwob.py b/flopy/modflow/mfflwob.py index 90e396ea0..1513ed3f0 100644 --- a/flopy/modflow/mfflwob.py +++ b/flopy/modflow/mfflwob.py @@ -164,38 +164,28 @@ def __init__( ] pakunits = {"chob": 40, "gbob": 41, "drob": 42, "rvob": 43} outunits = {"chob": 140, "gbob": 141, "drob": 142, "rvob": 143} - # if unitnumber is None: - # unitnumber = [40, 140, 41, 141, 42, 142, 43, 143] if flowtype.upper().strip() == "CHD": name = ["CHOB", "DATA"] extension = extension[0:2] - # unitnumber = unitnumber[0:2] - # iufbobsv = unitnumber[1] self._ftype = "CHOB" self.url = "chob.html" self.heading = "# CHOB for MODFLOW, generated by Flopy." elif flowtype.upper().strip() == "GHB": name = ["GBOB", "DATA"] extension = extension[2:4] - # unitnumber = unitnumber[2:4] - # iufbobsv = unitnumber[1] self._ftype = "GBOB" self.url = "gbob.html" self.heading = "# GBOB for MODFLOW, generated by Flopy." elif flowtype.upper().strip() == "DRN": name = ["DROB", "DATA"] extension = extension[4:6] - # unitnumber = unitnumber[4:6] - # iufbobsv = unitnumber[1] self._ftype = "DROB" self.url = "drob.html" self.heading = "# DROB for MODFLOW, generated by Flopy." elif flowtype.upper().strip() == "RIV": name = ["RVOB", "DATA"] extension = extension[6:8] - # unitnumber = unitnumber[6:8] - # iufbobsv = unitnumber[1] self._ftype = "RVOB" self.url = "rvob.html" self.heading = "# RVOB for MODFLOW, generated by Flopy." @@ -318,7 +308,6 @@ def write_file(self): # write sections 3-5 looping through observations groups c = 0 for i in range(self.nqfb): - # while (i < self.nqfb): # write section 3 f_fbob.write(f"{self.nqobfb[i]:10d}{self.nqclfb[i]:10d}\n") diff --git a/flopy/modflow/mfgage.py b/flopy/modflow/mfgage.py index ba2463ff2..1b7c4b76d 100644 --- a/flopy/modflow/mfgage.py +++ b/flopy/modflow/mfgage.py @@ -131,7 +131,7 @@ def __init__( # convert gage_data to a recarray, if necessary if isinstance(gage_data, np.ndarray): if not gage_data.dtype == dtype: - gage_data = np.core.records.fromarrays( + gage_data = np.rec.fromarrays( gage_data.transpose(), dtype=dtype ) elif isinstance(gage_data, pd.DataFrame): diff --git a/flopy/modflow/mfhfb.py b/flopy/modflow/mfhfb.py index 19f30fbee..6d9479c13 100644 --- a/flopy/modflow/mfhfb.py +++ b/flopy/modflow/mfhfb.py @@ -313,7 +313,6 @@ def load(cls, f, model, ext_unit_dict=None): it = 2 while it < len(t): toption = t[it] - # print it, t[it] if toption.lower() == "noprint": options.append(toption) elif "aux" in toption.lower(): diff --git a/flopy/modflow/mflpf.py b/flopy/modflow/mflpf.py index 0b192bcd7..8dd6fd33b 100644 --- a/flopy/modflow/mflpf.py +++ b/flopy/modflow/mflpf.py @@ -480,9 +480,6 @@ def load(cls, f, model, ext_unit_dict=None, check=True): if model.version == "mfusg" and not model.structured: ikcflag = int(t[3]) item1_len = 4 - # if ipakcb != 0: - # model.add_pop_key_list(ipakcb) - # ipakcb = 53 # options storagecoefficient = False constantcv = False @@ -546,7 +543,6 @@ def load(cls, f, model, ext_unit_dict=None, check=True): par_types = [] if nplpf > 0: par_types, parm_dict = mfpar.load(f, nplpf, model.verbose) - # print parm_dict # non-parameter data transient = not dis.steady.all() diff --git a/flopy/modflow/mfmlt.py b/flopy/modflow/mfmlt.py index 6a84519b1..69413dffc 100644 --- a/flopy/modflow/mfmlt.py +++ b/flopy/modflow/mfmlt.py @@ -84,23 +84,22 @@ def __init__( if mult_dict is not None: self.nml = len(mult_dict) self.mult_dict = mult_dict - # print mult_dict self.parent.add_package(self) def write_file(self): """ Write the package file. - Returns - ------- - None + Raises + ------ + NotImplementedError Notes ----- Not implemented because parameters are only supported on load """ - pass + raise NotImplementedError @classmethod def load(cls, f, model, nrow=None, ncol=None, ext_unit_dict=None): diff --git a/flopy/modflow/mfmnw1.py b/flopy/modflow/mfmnw1.py index 316f32313..d18c883b8 100644 --- a/flopy/modflow/mfmnw1.py +++ b/flopy/modflow/mfmnw1.py @@ -127,12 +127,6 @@ def __init__( losstype # -string indicating head loss type for each well ) self.wel1_bynode_qsum = wel1_bynode_qsum # -nested list containing file names, unit numbers, and ALLTIME flag for auxiliary output, e.g. [['test.ByNode',92,'ALLTIME']] - # if stress_period_data is not None: - # for per, spd in stress_period_data.items(): - # for n in spd.dtype.names: - # self.stress_period_data[per] = ModflowMnw1.get_empty_stress_period_data(len(spd), - # structured=self.parent.structured) - # self.stress_period_data[per][n] = stress_period_data[per][n] if dtype is not None: self.dtype = dtype else: @@ -149,9 +143,6 @@ def __init__( "LOSSTYPE (%s) must be one of the following: skin, linear, nonlinear" % (self.losstype) ) - # auxFileExtensions = ['wl1','ByNode','Qsum'] - # for each in self.wel1_bynode_qsum: - # assert each[0].split('.')[1] in auxFileExtensions, 'File extensions in "wel1_bynode_qsum" must be one of the following: ".wl1", ".ByNode", or ".Qsum".' self.parent.add_package(self) @staticmethod @@ -278,7 +269,6 @@ def write_file(self): """ # -open file for writing - # f_mnw1 = open( self.file_name[0], 'w' ) f = open(self.fn_path, "w") # -write header diff --git a/flopy/modflow/mfmnw2.py b/flopy/modflow/mfmnw2.py index f77859123..1ba021496 100644 --- a/flopy/modflow/mfmnw2.py +++ b/flopy/modflow/mfmnw2.py @@ -731,9 +731,7 @@ def _set_attributes_from_node_data(self): names = Mnw.get_item2_names(node_data=self.node_data) for n in names: # assign by node variables as lists if they are being included - if ( - n in self.by_node_variables - ): # and len(np.unique(self.node_data[n])) > 1: + if n in self.by_node_variables: self.__dict__[n] = list(self.node_data[n]) else: self.__dict__[n] = self.node_data[n][0] @@ -841,7 +839,6 @@ def _getloc(n): continue # only write variables by node if they are unique lists > length 1 if len(np.unique(val)) > 1: - # if isinstance(val, list) or val < 0: fmt = " " + float_format f_mnw.write(fmt.format(self.node_data[var][n])) f_mnw.write("\n") @@ -1059,10 +1056,7 @@ def __init__( ] # recarray of Mnw properties by node self.nodtot = len(self.node_data) self._sort_node_data() - # self.node_data.sort(order=['wellid', 'k']) - # Python 3.5.0 produces a segmentation fault when trying to sort BR MNW wells - # self.node_data.sort(order='wellid', axis=0) self.mnw = mnw # dict or list of Mnw objects self.stress_period_data = MfList( @@ -1553,12 +1547,6 @@ def make_mnw_objects(self): for wellid in mnws: nd = node_data[node_data.wellid == wellid] nnodes = Mnw.get_nnodes(nd) - # if tops and bottoms are specified, flip nnodes - # maxtop = np.max(nd.ztop) - # minbot = np.min(nd.zbotm) - # if maxtop - minbot > 0 and nnodes > 0: - # nnodes *= -1 - # reshape stress period data to well mnwspd = Mnw.get_empty_stress_period_data( self.nper, aux_names=self.aux ) diff --git a/flopy/modflow/mfmnwi.py b/flopy/modflow/mfmnwi.py index b49bfc400..a0fbdb89d 100644 --- a/flopy/modflow/mfmnwi.py +++ b/flopy/modflow/mfmnwi.py @@ -314,10 +314,6 @@ def write_file(self): # -open file for writing f = open(self.fn_path, "w") - # header not supported - # # -write header - # f.write('{}\n'.format(self.heading)) - # dataset 1 - WEL1flag QSUMflag SYNDflag line = f"{self.wel1flag:10d}" line += f"{self.qsumflag:10d}" diff --git a/flopy/modflow/mfoc.py b/flopy/modflow/mfoc.py index 5a71c3e62..b98f38ac7 100644 --- a/flopy/modflow/mfoc.py +++ b/flopy/modflow/mfoc.py @@ -363,13 +363,13 @@ def check(self, f=None, verbose=True, level=1, checktype=None): if len(words) < 2: chk._add_to_summary( "Warning", - package="OC", # value=kperkstp, + package="OC", desc=f"action {action!r} ignored; too few words", ) elif words[0:2] not in expected_actions: chk._add_to_summary( "Warning", - package="OC", # value=kperkstp, + package="OC", desc=f"action {action!r} ignored", ) # TODO: check data list of layers for some actions @@ -377,7 +377,7 @@ def check(self, f=None, verbose=True, level=1, checktype=None): # repeat as many times as remaining keys not used chk._add_to_summary( "Warning", - package="OC", # value=kperkstp, + package="OC", desc="action(s) defined in OC stress_period_data ignored " "as they are not part the stress periods defined by DIS", ) diff --git a/flopy/modflow/mfpar.py b/flopy/modflow/mfpar.py index 0b61758ec..de4cea6c4 100644 --- a/flopy/modflow/mfpar.py +++ b/flopy/modflow/mfpar.py @@ -301,10 +301,8 @@ def parameter_fill(model, shape, findkey, parm_dict, findlayer=None): pv = float(model.mfpar.pval.pval_dict[key.lower()]) except: pv = float(parval) - # print partyp, parval, nclu, clusters if partyp == findkey: for [layer, mltarr, zonarr, izones] in clusters: - # print layer, mltarr, zonarr, izones foundlayer = False if findlayer is None: foundlayer = True diff --git a/flopy/modflow/mfparbc.py b/flopy/modflow/mfparbc.py index a94e070c7..85250c576 100644 --- a/flopy/modflow/mfparbc.py +++ b/flopy/modflow/mfparbc.py @@ -203,7 +203,6 @@ def loadarray(f, npar, verbose=False): pinst, ] - # print bc_parms bcpar = ModflowParBc(bc_parms) return bcpar @@ -245,7 +244,6 @@ def parameter_bcfill(model, shape, parm_dict, pak_parms): dtype = np.float32 data = np.zeros(shape, dtype=dtype) for key, value in parm_dict.items(): - # print key, value pdict, idict = pak_parms.bc_parms[key] inst_data = idict[value] if model.mfpar.pval is None: @@ -257,7 +255,6 @@ def parameter_bcfill(model, shape, parm_dict, pak_parms): pv = float(pdict["parval"]) for [mltarr, zonarr, izones] in inst_data: model.parameter_load = True - # print mltarr, zonarr, izones if mltarr.lower() == "none": mult = np.ones(shape, dtype=dtype) else: diff --git a/flopy/modflow/mfpbc.py b/flopy/modflow/mfpbc.py index 2f857ea31..9239d1385 100644 --- a/flopy/modflow/mfpbc.py +++ b/flopy/modflow/mfpbc.py @@ -50,18 +50,6 @@ def __init__( self.mxcos, self.cosines = self.assign_layer_row_column_data( cosines, 3, zerobase=False ) - # self.mxcos = 0 - # if (cosines != None): - # error_message = 'cosines must have 3 columns' - # if (not isinstance(cosines, list)): - # cosines = [cosines] - # for a in cosines: - # a = np.atleast_2d(a) - # nr, nc = a.shape - # assert nc == 3, error_message - # if (nr > self.mxcos): - # self.mxcos = nr - # self.cosines = cosines self.np = 0 self.parent.add_package(self) diff --git a/flopy/modflow/mfpcg.py b/flopy/modflow/mfpcg.py index 2b2ee78d2..8145c21a8 100644 --- a/flopy/modflow/mfpcg.py +++ b/flopy/modflow/mfpcg.py @@ -248,7 +248,6 @@ def load(cls, f, model, ext_unit_dict=None): # free format if ifrfm: t = line_parse(line) - # t = line.strip().split() mxiter = int(t[0]) iter1 = int(t[1]) npcond = int(t[2]) @@ -262,7 +261,6 @@ def load(cls, f, model, ext_unit_dict=None): try: line = f.readline() t = line_parse(line) - # t = line.strip().split() hclose = float(t[0]) rclose = float(t[1]) relax = float(t[2]) diff --git a/flopy/modflow/mfpval.py b/flopy/modflow/mfpval.py index 151cf46a3..ff65054de 100644 --- a/flopy/modflow/mfpval.py +++ b/flopy/modflow/mfpval.py @@ -92,16 +92,16 @@ def write_file(self): """ Write the package file. - Returns - ------- - None + Raises + ------ + NotImplementedError Notes ----- Not implemented because parameters are only supported on load """ - pass + raise NotImplementedError def __getitem__(self, item): """ diff --git a/flopy/modflow/mfrch.py b/flopy/modflow/mfrch.py index 4803dcfdc..a9a22e394 100644 --- a/flopy/modflow/mfrch.py +++ b/flopy/modflow/mfrch.py @@ -235,8 +235,8 @@ def check( if Tmean != 0: R_T = period_means / Tmean - lessthan = np.where(R_T < RTmin)[0] - greaterthan = np.where(R_T > RTmax)[0] + lessthan = np.asarray(R_T < RTmin).nonzero()[0] + greaterthan = np.asarray(R_T > RTmax).nonzero()[0] if len(lessthan) > 0: txt = ( diff --git a/flopy/modflow/mfsfr2.py b/flopy/modflow/mfsfr2.py index 68c5bffa9..62112fb51 100644 --- a/flopy/modflow/mfsfr2.py +++ b/flopy/modflow/mfsfr2.py @@ -306,9 +306,7 @@ class ModflowSfr2(Package): nsfrpar = 0 default_value = 0.0 - # LENUNI = {"u": 0, "f": 1, "m": 2, "c": 3} len_const = {1: 1.486, 2: 1.0, 3: 100.0} - # {"u": 0, "s": 1, "m": 2, "h": 3, "d": 4, "y": 5} time_const = {1: 1.0, 2: 60.0, 3: 3600.0, 4: 86400.0, 5: 31557600.0} def __init__( @@ -511,7 +509,6 @@ def __init__( nseg = len(segment_data[i]) self.segment_data[i] = self.get_empty_segment_data(nseg) for n in segment_data[i].dtype.names: - # inds = (segment_data[i]['nseg'] -1).astype(int) self.segment_data[i][n] = segment_data[i][n] # compute outreaches if nseg and outseg columns have non-default values if ( @@ -920,7 +917,6 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): # these could also be implemented as structured arrays with a column for segment number current_6d = {} current_6e = {} - # print(i,icalc,nstrm,isfropt,reachinput) for j in range(itmp): dataset_6a = _parse_6a(f.readline(), option) current_aux[j] = dataset_6a[-1] @@ -970,7 +966,6 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): dataset_6d.append( _get_dataset(f.readline(), [0.0] * 8) ) - # dataset_6d.append(list(map(float, f.readline().strip().split()))) current_6d[temp_nseg] = dataset_6d if icalc == 4: nstrpts = dataset_6a[5] @@ -1095,7 +1090,7 @@ def check(self, f=None, verbose=True, level=1, checktype=None): pth = os.path.join(self.parent.model_ws, f) f = open(pth, "w") f.write(f"{chk.txt}\n") - # f.close() + f.close() return chk def assign_layers(self, adjust_botms=False, pad=1.0): @@ -1152,8 +1147,6 @@ def assign_layers(self, adjust_botms=False, pad=1.0): self.reach_data.j == jb ) botm[-1, ib, jb] = streambotms[inds].min() - pad - # l.append(botm[-1, ib, jb]) - # botm[-1, below_i, below_j] = streambotms[below] - pad l.append(botm[-1, below_i, below_j]) header += ",new_model_botm" self.parent.dis.botm = botm @@ -1205,65 +1198,7 @@ def get_outlets(self, level=0, verbose=True): per > 0 > self.dataset_5[per][0] ): # skip stress periods where seg data not defined continue - # segments = self.segment_data[per].nseg - # outsegs = self.segment_data[per].outseg - # - # all_outsegs = np.vstack([segments, outsegs]) - # max_outseg = all_outsegs[-1].max() - # knt = 1 - # while max_outseg > 0: - # - # nextlevel = np.array([outsegs[s - 1] if s > 0 and s < 999999 else 0 - # for s in all_outsegs[-1]]) - # - # all_outsegs = np.vstack([all_outsegs, nextlevel]) - # max_outseg = nextlevel.max() - # if max_outseg == 0: - # break - # knt += 1 - # if knt > self.nss: - # # subset outsegs map to only include rows with outseg number > 0 in last column - # circular_segs = all_outsegs.T[all_outsegs[-1] > 0] - # - # # only retain one instance of each outseg number at iteration=nss - # vals = [] # append outseg values to vals after they've appeared once - # mask = [(True, vals.append(v))[0] - # if v not in vals - # else False for v in circular_segs[-1]] - # circular_segs = circular_segs[:, np.array(mask)] - # - # # cull the circular segments array to remove duplicate instances of routing circles - # circles = [] - # duplicates = [] - # for i in range(np.shape(circular_segs)[0]): - # # find where values in the row equal the last value; - # # record the index of the second to last instance of last value - # repeat_start_ind = np.where(circular_segs[i] == circular_segs[i, -1])[0][-2:][0] - # # use that index to slice out the repeated segment sequence - # circular_seq = circular_segs[i, repeat_start_ind:].tolist() - # # keep track of unique sequences of repeated segments - # if set(circular_seq) not in circles: - # circles.append(set(circular_seq)) - # duplicates.append(False) - # else: - # duplicates.append(True) - # circular_segs = circular_segs[~np.array(duplicates), :] - # - # txt += '{0} instances where an outlet was not found after {1} consecutive segments!\n' \ - # .format(len(circular_segs), self.nss) - # if level == 1: - # txt += '\n'.join([' '.join(map(str, row)) for row in circular_segs]) + '\n' - # else: - # f = 'circular_routing.csv' - # np.savetxt(f, circular_segs, fmt='%d', delimiter=',', header=txt) - # txt += 'See {} for details.'.format(f) - # if verbose: - # print(txt) - # break - # # the array of segment sequence is useful for other other operations, - # # such as plotting elevation profiles - # self.outsegs[per] = all_outsegs - # + # use graph instead of above loop nrow = len(self.segment_data[per].nseg) ncol = np.max( @@ -1275,12 +1210,6 @@ def get_outlets(self, level=0, verbose=True): all_outsegs[i, : len(v)] = v all_outsegs.sort(axis=0) self.outsegs[per] = all_outsegs - # create a dictionary listing outlets associated with each segment - # outlet is the last value in each row of outseg array that is != 0 or 999999 - # self.outlets[per] = {i + 1: r[(r != 0) & (r != 999999)][-1] - # if len(r[(r != 0) & (r != 999999)]) > 0 - # else i + 1 - # for i, r in enumerate(all_outsegs.T)} self.outlets[per] = { k: self.paths[k][-1] if k in self.paths else k for k in self.segment_data[per].nseg @@ -1290,7 +1219,7 @@ def get_outlets(self, level=0, verbose=True): def reset_reaches(self): self.reach_data.sort(order=["iseg", "ireach"]) reach_data = self.reach_data - segment_data = list(set(self.reach_data.iseg)) # self.segment_data[0] + segment_data = list(set(self.reach_data.iseg)) reach_counts = np.bincount(reach_data.iseg)[1:] reach_counts = dict(zip(range(1, len(reach_counts) + 1), reach_counts)) ireach = [list(range(1, reach_counts[s] + 1)) for s in segment_data] @@ -1443,9 +1372,7 @@ def get_variable_by_stress_period(self, varname): all_data[inds, per] = self.segment_data[per][varname] dtype.append((f"{varname}{per}", float)) isvar = all_data.sum(axis=1) != 0 - ra = np.core.records.fromarrays( - all_data[isvar].transpose().copy(), dtype=dtype - ) + ra = np.rec.fromarrays(all_data[isvar].transpose().copy(), dtype=dtype) segs = self.segment_data[0].nseg[isvar] isseg = np.array( [True if s in segs else False for s in self.reach_data.iseg] @@ -1458,7 +1385,7 @@ def get_variable_by_stress_period(self, varname): return ra.view(np.recarray) def repair_outsegs(self): - isasegment = np.in1d( + isasegment = np.isin( self.segment_data[0].outseg, self.segment_data[0].nseg ) isasegment = isasegment | (self.segment_data[0].outseg < 0) @@ -1584,7 +1511,7 @@ def plot_path(self, start_seg=None, end_seg=0, plot_segment_lines=True): # slice the path path = np.array(self.paths[start_seg]) - endidx = np.where(path == end_seg)[0] + endidx = np.asarray(path == end_seg).nonzero()[0] endidx = endidx if len(endidx) > 0 else None path = path[: np.squeeze(endidx)] path = [s for s in path if s > 0] # skip lakes for now @@ -1596,7 +1523,7 @@ def plot_path(self, start_seg=None, end_seg=0, plot_segment_lines=True): dist = np.cumsum(tmp.rchlen.values) * to_miles.get(mfunits, 1.0) # segment starts - starts = dist[np.where(tmp.ireach.values == 1)[0]] + starts = dist[np.asarray(tmp.ireach.values == 1).nonzero()[0]] ax = plt.subplots(figsize=(11, 8.5))[-1] ax.plot(dist, tops, label="Model top") @@ -1769,7 +1696,6 @@ def _write_reach_data(self, f_sfr): ), "MfList.__tofile() data arg not a recarray" # decide which columns to write - # columns = self._get_item2_names() columns = _get_item2_names( self.nstrm, self.reachinput, @@ -1777,10 +1703,6 @@ def _write_reach_data(self, f_sfr): structured=self.parent.structured, ) - # Add one to the kij indices - # names = self.reach_data.dtype.names - # lnames = [] - # [lnames.append(name.lower()) for name in names] # --make copy of data for multiple calls d = np.array(self.reach_data) for idx in ["k", "i", "j", "node"]: @@ -1973,10 +1895,6 @@ def write_file(self, filename=None): """ - # tabfiles = False - # tabfiles_dict = {} - # transroute = False - # reachinput = False if filename is not None: self.fn_path = filename @@ -2034,7 +1952,6 @@ def write_file(self, filename=None): f_sfr.write("\n") if icalc == 4: - # nstrpts = self.segment_data[i][j][5] for k in range(3): for d in self.channel_flow_data[i][nseg][k]: f_sfr.write(f"{d:.2f} ") @@ -2405,7 +2322,6 @@ def numbering(self): passed = False if self.verbose: print(headertxt.strip()) - # for per, segment_data in self.segment_data.items(): inds = (sd.outseg < sd.nseg) & (sd.outseg > 0) @@ -2436,7 +2352,6 @@ def routing(self): if self.verbose: print(headertxt.strip()) - # txt += self.sfr.get_outlets(level=self.level, verbose=False) # will print twice if verbose=True # simpler check method using paths from routing graph circular_segs = [k for k, v in self.sfr.paths.items() if v is None] if len(circular_segs) > 0: @@ -2496,7 +2411,7 @@ def routing(self): # max node with * a tolerance # 1.25 * hyp is greater than distance of two diagonally adjacent nodes # where one is 1.5x larger than the other - breaks = np.where(dist > hyp * 1.25) + breaks = np.asarray(dist > hyp * 1.25).nonzero() breaks_reach_data = rd[breaks] segments_with_breaks = set(breaks_reach_data.iseg) if len(breaks) > 0: @@ -2838,10 +2753,6 @@ def elevations(self, min_strtop=-10, max_strtop=15000): # (for other uses). Not sure if other check methods should also copy reach_data directly from # SFR package instance for consistency. - # use outreach values to get downstream elevations - # non_outlets = reach_data[reach_data.outreach != 0] - # outreach_elevdn = np.array([reach_data.strtop[o - 1] for o in reach_data.outreach]) - # d_strtop = outreach_elevdn[reach_data.outreach != 0] - non_outlets.strtop rd = recfunctions.append_fields( rd, names=["strtopdn", "d_strtop"], @@ -3320,8 +3231,6 @@ def _parse_1c(line, reachinput, transroute): """ na = 0 - # line = _get_dataset(line, [0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 1, 30, 1, 2, 0.75, 0.0001, []]) - # line = line.strip().split() line = line_parse(line) nstrm = int(line.pop(0)) @@ -3398,7 +3307,6 @@ def _parse_6a(line, option): ------- a list of length 13 containing all variables for Data Set 6a """ - # line = line.strip().split() line = line_parse(line) xyz = [] diff --git a/flopy/modflow/mfstr.py b/flopy/modflow/mfstr.py index d7a5aaf52..572efa7e1 100644 --- a/flopy/modflow/mfstr.py +++ b/flopy/modflow/mfstr.py @@ -371,9 +371,7 @@ def __init__( ) assert d.dtype == self.dtype, e elif isinstance(d, np.ndarray): - d = np.core.records.fromarrays( - d.transpose(), dtype=self.dtype - ) + d = np.rec.fromarrays(d.transpose(), dtype=self.dtype) elif isinstance(d, int): if model.verbose: if d < 0: @@ -404,9 +402,7 @@ def __init__( ) assert d.dtype == self.dtype2, e elif isinstance(d, np.ndarray): - d = np.core.records.fromarrays( - d.transpose(), dtype=self.dtype2 - ) + d = np.rec.fromarrays(d.transpose(), dtype=self.dtype2) elif isinstance(d, int): if model.verbose: if d < 0: diff --git a/flopy/modflow/mfswi2.py b/flopy/modflow/mfswi2.py index 4e4672458..bc783208d 100644 --- a/flopy/modflow/mfswi2.py +++ b/flopy/modflow/mfswi2.py @@ -476,7 +476,6 @@ def write_file(self, check=True, f=None): if self.nobs > 0: f.write("# Dataset 8\n") for i in range(self.nobs): - # f.write(self.obsnam[i] + 3 * '%10i' % self.obslrc + '\n') f.write(f"{self.obsnam[i]} ") for v in self.obslrc[i, :]: f.write(f"{v + 1:10d}") diff --git a/flopy/modflow/mfswr1.py b/flopy/modflow/mfswr1.py index 9e6c815cc..ae628a9c5 100644 --- a/flopy/modflow/mfswr1.py +++ b/flopy/modflow/mfswr1.py @@ -87,15 +87,12 @@ def write_file(self): """ Write the package file. - Returns - ------- - None + Raises + ------ + NotImplementedError """ - print("SWR1 write method not implemented yet") - # f = open(self.fn_path, 'w') - # f.write('{0}\n'.format(self.heading)) - # f.close() + raise NotImplementedError("SWR1 write method not implemented yet") @classmethod def load(cls, f, model, ext_unit_dict=None): diff --git a/flopy/modflow/mfswt.py b/flopy/modflow/mfswt.py index a769be0f3..3a43cbfe9 100644 --- a/flopy/modflow/mfswt.py +++ b/flopy/modflow/mfswt.py @@ -821,9 +821,6 @@ def load(cls, f, model, ext_unit_dict=None): print(f" loading swt dataset 15 for layer {kk}") ids16 = np.empty(26, dtype=np.int32) ids16 = read1d(f, ids16) - # for k in range(1, 26, 2): - # model.add_pop_key_list(ids16[k]) - # ids16[k] = 2054 # all sub-wt data sent to unit 2054 # dataset 17 ids17 = [0] * iswtoc for k in range(iswtoc): diff --git a/flopy/modflow/mfwel.py b/flopy/modflow/mfwel.py index b6532331e..e211c949a 100644 --- a/flopy/modflow/mfwel.py +++ b/flopy/modflow/mfwel.py @@ -251,8 +251,10 @@ def write_file(self, f=None): """ Write the package file. - Parameters: - f: (str) optional file name + Parameters + ---------- + f : str, optional + file name Returns ------- diff --git a/flopy/modflow/mfzon.py b/flopy/modflow/mfzon.py index 891840d6b..ca63a8efd 100644 --- a/flopy/modflow/mfzon.py +++ b/flopy/modflow/mfzon.py @@ -95,16 +95,16 @@ def write_file(self): """ Write the package file. - Returns - ------- - None + Raises + ------ + NotImplementedError Notes ----- Not implemented because parameters are only supported on load """ - return + raise NotImplementedError @classmethod def load(cls, f, model, nrow=None, ncol=None, ext_unit_dict=None): diff --git a/flopy/modpath/mp6.py b/flopy/modpath/mp6.py index fe4c96868..7f8b04d66 100644 --- a/flopy/modpath/mp6.py +++ b/flopy/modpath/mp6.py @@ -16,13 +16,12 @@ class Modpath6List(Package): def __init__(self, model, extension="list", listunit=7): # call base package constructor super().__init__(model, extension, "LIST", listunit) - # self.parent.add_package(self) This package is not added to the base + # This package is not added to the base # model so that it is not included in get_name_file_entries() return def write_file(self): - # Not implemented for list class - return + raise NotImplementedError class Modpath6(BaseModel): @@ -441,9 +440,6 @@ def append_node(ifaces_well, wellid, node_number, k, i, j): append_node(side_faces, wellid, n, k, i, j) elif package.upper() == "RCH": ParticleGenerationOption = 1 - # for j in range(nrow): - # for i in range(ncol): - # group_name.append('rch') group_name.append("rch") group_placement.append( [ diff --git a/flopy/modpath/mp6bas.py b/flopy/modpath/mp6bas.py index 7e9333f11..141e8397e 100644 --- a/flopy/modpath/mp6bas.py +++ b/flopy/modpath/mp6bas.py @@ -152,7 +152,6 @@ def write_file(self): for i in range(self.def_face_ct): f_bas.write(f"{self.bud_label[i]:20s}\n") f_bas.write(f"{self.def_iface[i]:2d}\n") - # f_bas.write('\n') # need to reset lc fmtin lc = self.laytyp @@ -161,7 +160,6 @@ def write_file(self): # from modpath bas--uses keyword array types f_bas.write(self.ibound.get_file_entry()) # from MT3D bas--uses integer array types - # f_bas.write(self.ibound.get_file_entry()) f_bas.write(self.prsity.get_file_entry()) f_bas.write(self.prsityCB.get_file_entry()) diff --git a/flopy/modpath/mp6sim.py b/flopy/modpath/mp6sim.py index f2f9ea6ac..1cf465958 100644 --- a/flopy/modpath/mp6sim.py +++ b/flopy/modpath/mp6sim.py @@ -150,17 +150,6 @@ def __init__( self.retard_fac = retard_fac self.retard_fcCB = retard_fcCB - # self.mask_nlay = Util3d(model,(nlay,nrow,ncol),np.int32,\ - # mask_nlay,name='mask_nlay',locat=self.unit_number[0]) - # self.mask_1lay = Util3d(model,(nlay,nrow,ncol),np.int32,\ - # mask_1lay,name='mask_1lay',locat=self.unit_number[0]) - # self.stop_zone = Util3d(model,(nlay,nrow,ncol),np.int32,\ - # stop_zone,name='stop_zone',locat=self.unit_number[0]) - # self.retard_fac = Util3d(model,(nlay,nrow,ncol),np.float32,\ - # retard_fac,name='retard_fac',locat=self.unit_number[0]) - # self.retard_fcCB = Util3d(model,(nlay,nrow,ncol),np.float32,\ - # retard_fcCB,name='retard_fcCB',locat=self.unit_number[0]) - self.parent.add_package(self) def check(self, f=None, verbose=True, level=1, checktype=None): diff --git a/flopy/modpath/mp7.py b/flopy/modpath/mp7.py index 618ea20aa..75d4a0ce8 100644 --- a/flopy/modpath/mp7.py +++ b/flopy/modpath/mp7.py @@ -29,13 +29,12 @@ def __init__(self, model, extension="list", unitnumber=None): # call base package constructor super().__init__(model, extension, "LIST", unitnumber) - # self.parent.add_package(self) This package is not added to the base + # This package is not added to the base # model so that it is not included in get_name_file_entries() return def write_file(self): - # Not implemented for list class - return + raise NotImplementedError class Modpath7(BaseModel): diff --git a/flopy/mt3d/mt.py b/flopy/mt3d/mt.py index b31c5c74b..45599acee 100644 --- a/flopy/mt3d/mt.py +++ b/flopy/mt3d/mt.py @@ -36,8 +36,7 @@ def __repr__(self): return "List package class" def write_file(self): - # Not implemented for list class - return + raise NotImplementedError class Mt3dms(BaseModel): @@ -201,13 +200,8 @@ def __init__( # the starting external data unit number self._next_ext_unit = 2000 if external_path is not None: - # assert model_ws == '.', "ERROR: external cannot be used " + \ - # "with model_ws" - - # external_path = os.path.join(model_ws, external_path) if os.path.exists(external_path): print(f"Note: external_path {external_path} already exists") - # assert os.path.exists(external_path),'external_path does not exist' else: os.mkdir(external_path) self.external = True @@ -510,9 +504,6 @@ def load( namefile_path, mt.mfnam_packages, verbose=verbose ) except Exception as e: - # print("error loading name file entries from file") - # print(str(e)) - # return None raise Exception( f"error loading name file entries from file:\n{e!s}" ) diff --git a/flopy/mt3d/mtcts.py b/flopy/mt3d/mtcts.py index 396d3bc08..6e2eba045 100644 --- a/flopy/mt3d/mtcts.py +++ b/flopy/mt3d/mtcts.py @@ -144,19 +144,6 @@ def __init__( self, ): raise NotImplementedError() - # # unit number - # if unitnumber is None: - # unitnumber = self.unitnumber - # Package.__init__(self, model, extension, 'CTS', self.unitnumber) - # - # # Set dimensions - # nrow = model.nrow - # ncol = model.ncol - # nlay = model.nlay - # ncomp = model.ncomp - # mcomp = model.mcomp - - # Set package specific parameters @classmethod def load( @@ -201,70 +188,6 @@ def load( raise NotImplementedError() - # if model.verbose: - # sys.stdout.write('loading cts package file...\n') - # - # # Open file, if necessary - # openfile = not hasattr(f, 'read') - # if openfile: - # filename = f - # f = open(filename, 'r') - # - # # Set dimensions if necessary - # if nlay is None: - # nlay = model.nlay - # if nrow is None: - # nrow = model.nrow - # if ncol is None: - # ncol = model.ncol - # if nper is None: - # nper = model.nper - # if ncomp is None: - # ncomp = model.ncomp - # - # # Item 1 (MXCTS, ICTSOUT, MXEXT, MXINJ, MXWEL, IFORCE) - # line = f.readline() - # if line[0] == '#': - # raise ValueError('CTS package does not support comment lines') - # if model.verbose: - # print(' loading MXCTS, ICTSOUT, MXEXT, MXINJ, MXWEL, IFORCE...') - # - # m_arr = line.strip().split() - # mxcts = int(m_arr[0]) - # ictsout = int(m_arr[1]) - # mxext = int(m_arr[2]) - # mxinj = int(m_arr[3]) - # mxwel = int(m_arr[4]) - # iforce = int(m_arr[5]) - # - # # Start of transient data - # for iper in range(nper): - # - # if model.verbose: - # print(' loading CTS data for kper {0:5d}'.format(iper + 1)) - # - # # Item 2 (NCTS) - # line = f.readline() - # m_arr = line.strip().split() - # ncts = int(m_arr[0]) - # - # # Start of information for each CTS - # for icts in range(ncts): - # - # if model.verbose: - # print(' loading data for system #{0:5d}' - # .format(icts + 1)) - # # Item 3 (ICTS, NEXT, NINJ, ITRTINJ) - # line = f.readline() - # m_arr = line.strip().split() - # icts = int(m_arr[0]) - # next = int(m_arr[1]) - # ninj = int(m_arr[2]) - # itrtinj = int(m_arr[3]) - # - # if openfile: - # f.close() - @staticmethod def get_default_CTS_dtype(ncomp=1, iforce=0): """ @@ -273,53 +196,6 @@ def get_default_CTS_dtype(ncomp=1, iforce=0): raise NotImplementedError() - # # Item 3 - # type_list = [("icts", int), ("next", int), ("ninj", int), - # ("itrtinj", int)] - # - # # Create a list for storing items 5, 6, & 9 - # items_5_6_7_9_list = [] - # if ncomp > 1: - # # Item 5 in CTS input - # for comp in range(1, ncomp+1): - # qincts_name = "qincts{0:d}".format(comp) - # cincts_name = "cincts{0:d}".format(comp) - # items_5_6_7_9_list.append((qincts_name, np.float32)) - # items_5_6_7_9_list.append((cincts_name, np.float32)) - # - # # Item 6 in CTS input - # for comp in range(1, ncomp+1): - # ioptinj_name = "ioptinj{0:d}".format(comp) - # cmchginj_name = "cmchginj{0:d}".format(comp) - # items_5_6_7_9_list.append((ioptinj_name, int)) - # items_5_6_7_9_list.append((cmchginj_name, np.float32)) - # - # if iforce == 0: - # for comp in range(1, ncomp+1): - # cnte_name = "cnte{0:d}".format(comp) - # items_5_6_7_9_list.append(cnte_name, np.float32) - # - # # Item 9 in CTS input - # items_5_6_7_9_list.append(("qoutcts", np.float32)) - # - # type_list.append(items_5_6_7_9_list) - # - # # Now create a list for the records in Item 4 - # ext_wels_list = [("kext", int), ("iext", int), ("jext", int), - # ("iwext", int)] - # - # type_list.append(ext_wels_list) - # - # # Now create a list for the records in Item 8 - # inj_wels_list = [("kinj", int), ("iinj", int), ("jinj", int), - # ("iwinj", int)] - # type_list.append(inj_wels_list) - # - # # - # - # dtype = np.dtype(type_list) - # dtype = dtype - @staticmethod def _ftype(): return "CTS" diff --git a/flopy/mt3d/mtdsp.py b/flopy/mt3d/mtdsp.py index b0dd15512..c7811c013 100644 --- a/flopy/mt3d/mtdsp.py +++ b/flopy/mt3d/mtdsp.py @@ -436,12 +436,6 @@ def load( ext_unit_dict, array_format="mt3d", ) - # if model.mcomp > 1: - # for icomp in range(2, model.mcomp + 1): - # name = "dmcoef" + str(icomp + 1) - # u2d = Util2d.load(f, model, (nlay,), np.float32, name, - # ext_unit_dict, array_format="mt3d") - # kwargs[name] = u2d if openfile: f.close() diff --git a/flopy/mt3d/mtssm.py b/flopy/mt3d/mtssm.py index 7bc8416f7..8887c57cd 100644 --- a/flopy/mt3d/mtssm.py +++ b/flopy/mt3d/mtssm.py @@ -304,19 +304,6 @@ def __init__( array_free_format=False, ) self.crch.append(t2d) - # else: - # try: - # if model.mf.rch is not None: - # print("found 'rch' in modflow model, resetting crch to 0.0") - # self.crch = [Transient2d(model, (nrow, ncol), np.float32, - # 0, name='crch1', - # locat=self.unit_number[0], - # array_free_format=False)] - # - # else: - # self.crch = None - # except: - # self.crch = None self.cevt = None try: @@ -366,20 +353,6 @@ def __init__( ) self.cevt.append(t2d) - # else: - # try: - # if model.mf.evt is not None or model.mf.ets is not None: - # print("found 'ets'/'evt' in modflow model, resetting cevt to 0.0") - # self.cevt = [Transient2d(model, (nrow, ncol), np.float32, - # 0, name='cevt1', - # locat=self.unit_number[0], - # array_free_format=False)] - # - # else: - # self.cevt = None - # except: - # self.cevt = None - if len(list(kwargs.keys())) > 0: raise Exception( "SSM error: unrecognized kwargs: " diff --git a/flopy/mt3d/mttob.py b/flopy/mt3d/mttob.py index 0f2f82575..1d7927e48 100644 --- a/flopy/mt3d/mttob.py +++ b/flopy/mt3d/mttob.py @@ -70,8 +70,7 @@ def write_file(self): f_tob.write( "%s%10d%10d%10d\n" % (self.outnam, inConcObs, inFluxObs, inSaveObs) ) - # if (inConcObs): - # + if inFluxObs: nFluxGroup = len(self.FluxGroups) f_tob.write( diff --git a/flopy/pakbase.py b/flopy/pakbase.py index 31720c849..bf153da5d 100644 --- a/flopy/pakbase.py +++ b/flopy/pakbase.py @@ -812,7 +812,7 @@ def plot(self, **kwargs): MfList dictionary key. (default is None) Returns - ---------- + ------- axes : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis are returned. @@ -838,37 +838,10 @@ def plot(self, **kwargs): axes = PlotUtilities._plot_package_helper(self, **kwargs) return axes - def to_shapefile(self, filename, **kwargs): - """ - Export 2-D, 3-D, and transient 2-D model data to shapefile (polygons). - Adds an attribute for each layer in each data array - - Parameters - ---------- - filename : str - Shapefile name to write - - Returns - ---------- - None - - See Also - -------- - - Notes - ----- - - Examples - -------- - >>> import flopy - >>> ml = flopy.modflow.Modflow.load('test.nam') - >>> ml.lpf.to_shapefile('test_hk.shp') - - """ - import warnings - - warnings.warn("to_shapefile() is deprecated. use .export()") - self.export(filename) + def to_shapefile(self, *args, **kwargs): + """Raises AttributeError, use :meth:`export`.""" + # deprecated 3.2.4, changed to raise AttributeError version 3.8 + raise AttributeError(".to_shapefile() was removed; use .export()") def webdoc(self): """Open the web documentation.""" @@ -888,8 +861,7 @@ def write_file(self, f=None, check=False): Every Package needs its own write_file function """ - print("IMPLEMENTATION ERROR: write_file must be overloaded") - return + raise NotImplementedError("write_file must be overloaded") @staticmethod def load( diff --git a/flopy/pest/params.py b/flopy/pest/params.py index 7dc2535f9..ece259d54 100644 --- a/flopy/pest/params.py +++ b/flopy/pest/params.py @@ -74,7 +74,7 @@ def zonearray2params( plist = [] for i, iz in enumerate(parzones): span = {} - span["idx"] = np.where(zonearray == iz) + span["idx"] = np.asarray(zonearray == iz).nonzero() parname = f"{partype}_{iz}" startvalue = parvals[i] p = Params( diff --git a/flopy/plot/crosssection.py b/flopy/plot/crosssection.py index bca4b47fb..41231211c 100644 --- a/flopy/plot/crosssection.py +++ b/flopy/plot/crosssection.py @@ -797,9 +797,6 @@ def plot_grid(self, **kwargs): col = self.get_grid_line_collection(**kwargs) if col is not None: ax.add_collection(col) - # ax.set_xlim(self.extent[0], self.extent[1]) - # ax.set_ylim(self.extent[2], self.extent[3]) - return col def plot_bc( diff --git a/flopy/plot/map.py b/flopy/plot/map.py index 27b4677c0..06473bf7f 100644 --- a/flopy/plot/map.py +++ b/flopy/plot/map.py @@ -797,7 +797,7 @@ def plot_pathline(self, pl, travel_time=None, **kwargs): else: kon = self.layer else: - kon = self.layer + kon = -1 # configure plot settings marker = kwargs.pop("marker", None) diff --git a/flopy/plot/plotutil.py b/flopy/plot/plotutil.py index 57af252a4..4efab156a 100644 --- a/flopy/plot/plotutil.py +++ b/flopy/plot/plotutil.py @@ -346,9 +346,7 @@ def _plot_package_helper(package, **kwargs): ) elif isinstance(value, DataInterface): - if ( - value.data_type == DataType.transientlist - ): # isinstance(value, (MfList, MFTransientList)): + if value.data_type == DataType.transientlist: if package.parent.verbose: print( "plotting {} package MfList instance: {}".format( @@ -404,9 +402,7 @@ def _plot_package_helper(package, **kwargs): if ax is not None: caxs.append(ax) - elif ( - value.data_type == DataType.array3d - ): # isinstance(value, Util3d): + elif value.data_type == DataType.array3d: if value.array is not None: if package.parent.verbose: print( @@ -414,7 +410,6 @@ def _plot_package_helper(package, **kwargs): package.name[0], item ) ) - # fignum = list(range(ifig, ifig + inc)) fignum = list( range( defaults["initial_fig"], @@ -438,9 +433,7 @@ def _plot_package_helper(package, **kwargs): ) ) - elif ( - value.data_type == DataType.array2d - ): # isinstance(value, Util2d): + elif value.data_type == DataType.array2d: if value.array is not None: if len(value.array.shape) == 2: # is this necessary? if package.parent.verbose: @@ -470,9 +463,7 @@ def _plot_package_helper(package, **kwargs): ) ) - elif ( - value.data_type == DataType.transient2d - ): # isinstance(value, Transient2d): + elif value.data_type == DataType.transient2d: if value.array is not None: if package.parent.verbose: print( @@ -1656,9 +1647,8 @@ def line_intersect_grid(ptsin, xgrid, ygrid): numb = (x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3) denom = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1) ua = np.ones(denom.shape, dtype=denom.dtype) * np.nan - idx = np.where(denom != 0.0) + idx = np.asarray(denom != 0.0).nonzero() ua[idx] = numa[idx] / denom[idx] - # ub = numb / denom del numa del numb del denom @@ -2241,7 +2231,7 @@ def advanced_package_bc_helper(pkg, modelgrid, kper): idx = np.array([list(i) for i in mflist["cellid"]], dtype=int).T else: iuzfbnd = pkg.iuzfbnd.array - idx = np.where(iuzfbnd != 0) + idx = np.asarray(iuzfbnd != 0).nonzero() idx = np.append([[0] * idx[-1].size], idx, axis=0) elif pkg.package_type in ("lak", "maw"): if pkg.parent.version == "mf6": @@ -2249,7 +2239,7 @@ def advanced_package_bc_helper(pkg, modelgrid, kper): idx = np.array([list(i) for i in mflist["cellid"]], dtype=int).T else: lakarr = pkg.lakarr.array[kper] - idx = np.where(lakarr != 0) + idx = np.asarray(lakarr != 0).nonzero() idx = np.array(idx) else: raise NotImplementedError( @@ -2742,7 +2732,7 @@ def to_mp7_pathlines( data = data.to_records(index=False) # build mp7 format recarray - ret = np.core.records.fromarrays( + ret = np.rec.fromarrays( [ data[seqn_key], data["iprp"], @@ -2851,7 +2841,7 @@ def to_mp7_endpoints( endpts = endpts.to_records(index=False) # build mp7 format recarray - ret = np.core.records.fromarrays( + ret = np.rec.fromarrays( [ endpts["sequencenumber"], endpts["iprp"], @@ -2938,7 +2928,7 @@ def to_prt_pathlines( data = data.to_records(index=False) # build prt format recarray - ret = np.core.records.fromarrays( + ret = np.rec.fromarrays( [ data["stressperiod"], data["timestep"], diff --git a/flopy/seawat/swt.py b/flopy/seawat/swt.py index 689ff5af0..7339b6991 100644 --- a/flopy/seawat/swt.py +++ b/flopy/seawat/swt.py @@ -24,8 +24,7 @@ def __repr__(self): return "List package class" def write_file(self): - # Not implemented for list class - return + raise NotImplementedError class Seawat(BaseModel): @@ -146,10 +145,8 @@ def __init__( model_ws == "." ), "ERROR: external cannot be used with model_ws" - # external_path = os.path.join(model_ws, external_path) if os.path.exists(external_path): print(f"Note: external_path {external_path} already exists") - # assert os.path.exists(external_path),'external_path does not exist' else: os.mkdir(external_path) self.external = True @@ -295,17 +292,12 @@ def _set_name(self, value): # Overrides BaseModel's setter for name property super()._set_name(value) - # for i in range(len(self.lst.extension)): - # self.lst.file_name[i] = self.name + '.' + self.lst.extension[i] - # return - def change_model_ws(self, new_pth=None, reset_external=False): # if hasattr(self,"_mf"): if self._mf is not None: self._mf.change_model_ws( new_pth=new_pth, reset_external=reset_external ) - # if hasattr(self,"_mt"): if self._mt is not None: self._mt.change_model_ws( new_pth=new_pth, reset_external=reset_external @@ -496,6 +488,7 @@ def load( exe_name=None, verbose=verbose, model_ws=model_ws, + load_only=load_only, forgive=False, ) diff --git a/flopy/seawat/swtvsc.py b/flopy/seawat/swtvsc.py index f7bcaf319..368bd1f6a 100644 --- a/flopy/seawat/swtvsc.py +++ b/flopy/seawat/swtvsc.py @@ -227,14 +227,6 @@ def write_file(self): if self.mt3dmuflg == -1: f_vsc.write(f"{self.viscref}\n") f_vsc.write(f"{self.nsmueos} {self.mutempopt}\n") - # if self.nsmueos == 1: - # f_vsc.write('{} {} {}\n'.format(self.mtmuspec, self.dmudc, - # self.cmuref)) - # else: - # for iwr in range(self.nsmueos): - # f_vsc.write('{} {} {}\n'.format(self.mtmuspec[iwr], - # self.dmudc[iwr], - # self.cmuref[iwr])) if self.nsmueos > 0: for iwr in range(self.nsmueos): f_vsc.write( diff --git a/flopy/utils/binaryfile.py b/flopy/utils/binaryfile.py index 0efce1c49..fb7141092 100644 --- a/flopy/utils/binaryfile.py +++ b/flopy/utils/binaryfile.py @@ -10,15 +10,19 @@ """ import os +import tempfile import warnings from pathlib import Path from typing import List, Optional, Union import numpy as np +import pandas as pd from ..utils.datafile import Header, LayerFile from .gridutil import get_lni +HEAD_TEXT = " HEAD" + def write_head( fbin, @@ -27,7 +31,7 @@ def write_head( kper=1, pertim=1.0, totim=1.0, - text=" HEAD", + text=HEAD_TEXT, ilay=1, ): dt = np.dtype( @@ -170,10 +174,10 @@ class BinaryHeader(Header): Parameters ---------- - bintype : str - Type of file being opened. Accepted values are 'head' and 'ucn'. - precision : str - Precision of floating point data in the file. + bintype : str, default None + Type of file being opened. Accepted values are 'head' and 'ucn'. + precision : str, default 'single' + Precision of floating point data in the file. """ @@ -274,10 +278,16 @@ def binaryread_struct(file, vartype, shape=(1,), charlen=16): cannot be returned, only multi-character strings. Shape has no affect on strings. + .. deprecated:: 3.8.0 + Use :meth:`binaryread` instead. + """ import struct - import numpy as np + warnings.warn( + "binaryread_struct() is deprecated; use binaryread() instead.", + DeprecationWarning, + ) # store the mapping from type to struct format (fmt) typefmtd = {np.int32: "i", np.float32: "f", np.float64: "d"} @@ -292,7 +302,7 @@ def binaryread_struct(file, vartype, shape=(1,), charlen=16): # find the number of bytes for one value numbytes = vartype(1).nbytes # find the number of values - nval = np.core.fromnumeric.prod(shape) + nval = np.prod(shape) fmt = str(nval) + fmt s = file.read(numbytes * nval) result = struct.unpack(fmt, s) @@ -306,21 +316,48 @@ def binaryread_struct(file, vartype, shape=(1,), charlen=16): def binaryread(file, vartype, shape=(1,), charlen=16): """ - Uses numpy to read from binary file. This was found to be faster than the - struct approach and is used as the default. + Read character bytes, scalar or array values from a binary file. + Parameters + ---------- + file : file object + is an open file object + vartype : type + is the return variable type: bytes, numpy.int32, + numpy.float32, or numpy.float64. Using str is deprecated since + bytes is preferred. + shape : tuple, default (1,) + is the shape of the returned array (shape(1, ) returns a single + value) for example, shape = (nlay, nrow, ncol) + charlen : int, default 16 + is the length character bytes. Note that arrays of bytes + cannot be returned, only multi-character bytes. Shape has no + affect on bytes. + + Raises + ------ + EOFError """ - # read a string variable of length charlen if vartype == str: - result = file.read(charlen * 1) + # handle a hang-over from python2 + warnings.warn( + "vartype=str is deprecated; use vartype=bytes instead.", + DeprecationWarning, + ) + vartype = bytes + if vartype == bytes: + # read character bytes of length charlen + result = file.read(charlen) + if len(result) < charlen: + raise EOFError else: # find the number of values nval = np.prod(shape) result = np.fromfile(file, vartype, nval) - if nval == 1: - result = result # [0] - else: + if result.size < nval: + raise EOFError + if nval != 1: result = np.reshape(result, shape) return result @@ -345,23 +382,18 @@ def get_headfile_precision(filename: Union[str, os.PathLike]): Parameters ---------- filename : str or PathLike - Path of binary MODFLOW file to determine precision. + Path of binary MODFLOW file to determine precision. Returns ------- - result : str - Result will be unknown, single, or double + str + Result will be unknown, single, or double """ # Set default result if neither single or double works result = "unknown" - # Create string containing set of ascii characters - asciiset = " " - for i in range(33, 127): - asciiset += chr(i) - # Open file, and check filesize to ensure this is not an empty file f = open(filename, "rb") f.seek(0, 2) @@ -380,15 +412,12 @@ def get_headfile_precision(filename: Union[str, os.PathLike]): ("text", "S16"), ] hdr = binaryread(f, vartype) - text = hdr[0][4] - try: - text = text.decode() - for t in text: - if t.upper() not in asciiset: - raise Exception() + charbytes = list(hdr[0][4]) + if min(charbytes) >= 32 and max(charbytes) <= 126: + # check if bytes are within conventional ASCII range result = "single" success = True - except: + else: success = False # next try double @@ -402,14 +431,10 @@ def get_headfile_precision(filename: Union[str, os.PathLike]): ("text", "S16"), ] hdr = binaryread(f, vartype) - text = hdr[0][4] - try: - text = text.decode() - for t in text: - if t.upper() not in asciiset: - raise Exception() + charbytes = list(hdr[0][4]) + if min(charbytes) >= 32 and max(charbytes) <= 126: result = "double" - except: + else: f.close() raise ValueError( f"Could not determine the precision of the headfile {filename}" @@ -439,12 +464,6 @@ def __init__( ): super().__init__(filename, precision, verbose, kwargs) - def __enter__(self): - return self - - def __exit__(self, *exc): - self.close() - def _build_index(self): """ Build the recordarray and iposarray, which maps the header information @@ -490,9 +509,15 @@ def _build_index(self): # self.recordarray contains a recordarray of all the headers. self.recordarray = np.array(self.recordarray, dtype=self.header_dtype) - self.iposarray = np.array(self.iposarray) + self.iposarray = np.array(self.iposarray, dtype=np.int64) self.nlay = np.max(self.recordarray["ilay"]) + # provide headers as a pandas frame + self.headers = pd.DataFrame(self.recordarray, index=self.iposarray) + self.headers["text"] = ( + self.headers["text"].str.decode("ascii", "strict").str.strip() + ) + def get_databytes(self, header): """ @@ -536,7 +561,7 @@ def get_ts(self, idx): row, and column values must be zero based. Returns - ---------- + ------- out : numpy array Array has size (ntimes, ncells + 1). The first column in the data array will contain time (totim). @@ -576,7 +601,7 @@ def get_ts(self, idx): # Find the time index and then put value into result in the # correct location. - itim = np.where(result[:, 0] == header["totim"])[0] + itim = np.asarray(result[:, 0] == header["totim"]).nonzero()[0] result[itim, istat] = binaryread(self.file, self.realtype) istat += 1 return result @@ -609,11 +634,11 @@ class HeadFile(BinaryLayerFile): >>> import flopy.utils.binaryfile as bf >>> hdobj = bf.HeadFile('model.hds', precision='single') - >>> hdobj.list_records() + >>> hdobj.headers >>> rec = hdobj.get_data(kstpkper=(0, 49)) >>> ddnobj = bf.HeadFile('model.ddn', text='drawdown', precision='single') - >>> ddnobj.list_records() + >>> ddnobj.headers >>> rec = ddnobj.get_data(totim=100.) """ @@ -640,81 +665,99 @@ def __init__( def reverse(self, filename: Optional[os.PathLike] = None): """ - Write a new binary head file with the records in reverse order. - If a new filename is not provided, or if the filename is the same - as the existing filename, the file will be overwritten and data - reloaded from the rewritten/reversed file. + Reverse the time order of the currently loaded binary head file. If a head + file name is not provided or the provided name is the same as the existing + filename, the file will be overwritten and reloaded. Parameters ---------- filename : str or PathLike - Path of the new reversed binary file to create. + Path of the reversed binary head file. """ filename = ( Path(filename).expanduser().absolute() - if filename + if filename is not None else self.filename ) - # header array formats - dt = np.dtype( - [ - ("kstp", np.int32), - ("kper", np.int32), - ("pertim", np.float64), - ("totim", np.float64), - ("text", "S16"), - ("ncol", np.int32), - ("nrow", np.int32), - ("ilay", np.int32), - ] - ) - - # make sure we have tdis - if self.tdis is None or not any(self.tdis.perioddata.get_data()): - raise ValueError("tdis mu/st be known to reverse head file") - - # extract period data - pd = self.tdis.perioddata.get_data() - - # get maximum period number and total simulation time - kpermx = len(pd) - 1 - tsimtotal = 0.0 - for tpd in pd: - tsimtotal += tpd[0] - - # get total number of records - nrecords = self.recordarray.shape[0] - - # open backward file - with open(filename, "wb") as fbin: - # loop over head file records in reverse order - for idx in range(nrecords - 1, -1, -1): - # load header array - header = self.recordarray[idx].copy() - - # reverse kstp and kper in the header array - (kstp, kper) = (header["kstp"] - 1, header["kper"] - 1) - kstpmx = pd[kper][1] - 1 - kstpb = kstpmx - kstp - kperb = kpermx - kper - (header["kstp"], header["kper"]) = (kstpb + 1, kperb + 1) + def get_max_kper_kstp_tsim(): + header = self.recordarray[-1] + kper = header["kper"] - 1 + tsim = header["totim"] + kstp = {0: 0} + for i in range(len(self) - 1, -1, -1): + header = self.recordarray[i] + if ( + header["kper"] in kstp + and header["kstp"] > kstp[header["kper"]] + ): + kstp[header["kper"]] += 1 + else: + kstp[header["kper"]] = 0 + return kper, kstp, tsim + + # get max period and time from the head file + maxkper, maxkstp, maxtsim = get_max_kper_kstp_tsim() + # if we have tdis, get max period number and simulation time from it + tdis_maxkper, tdis_maxtsim = None, None + if self.tdis is not None: + pd = self.tdis.perioddata.get_data() + if any(pd): + tdis_maxkper = len(pd) - 1 + tdis_maxtsim = sum([p[0] for p in pd]) + # if we have both, check them against each other + if tdis_maxkper is not None: + assert maxkper == tdis_maxkper, ( + f"Max stress period in binary head file ({maxkper}) != " + f"max stress period in provided tdis ({tdis_maxkper})" + ) + assert maxtsim == tdis_maxtsim, ( + f"Max simulation time in binary head file ({maxtsim}) != " + f"max simulation time in provided tdis ({tdis_maxtsim})" + ) - # reverse totim and pertim in the header array - header["totim"] = tsimtotal - header["totim"] - perlen = pd[kper][0] - header["pertim"] = perlen - header["pertim"] + def reverse_header(header): + """Reverse period, step and time fields in the record header""" + + # reverse kstp and kper headers + kstp = header["kstp"] - 1 + kper = header["kper"] - 1 + header["kstp"] = maxkstp[kper] - kstp + 1 + header["kper"] = maxkper - kper + 1 + + # reverse totim and pertim headers + header["totim"] = maxtsim - header["totim"] + perlen = pd[kper][0] + header["pertim"] = perlen - header["pertim"] + return header + + # reverse record order and write to temporary file + temp_dir_path = Path(tempfile.gettempdir()) + temp_file_path = temp_dir_path / filename.name + with open(temp_file_path, "wb") as f: + for i in range(len(self) - 1, -1, -1): + header = self.recordarray[i].copy() + header = reverse_header(header) + data = self.get_data(idx=i) + ilay = header["ilay"] + write_head( + fbin=f, + data=data[ilay - 1], + kstp=header["kstp"], + kper=header["kper"], + pertim=header["pertim"], + totim=header["totim"], + ilay=ilay, + ) - # write header information - h = np.array(header, dtype=dt) - h.tofile(fbin) + # if we're rewriting the original file, close it first + if filename == self.filename: + self.close() - # load and write data - data = self.get_data(idx=idx)[0][0] - data = np.array(data, dtype=np.float64) - data.tofile(fbin) + # move temp file to destination + temp_file_path.replace(filename) # if we rewrote the original file, reinitialize if filename == self.filename: @@ -762,7 +805,7 @@ class UcnFile(BinaryLayerFile): >>> import flopy.utils.binaryfile as bf >>> ucnobj = bf.UcnFile('MT3D001.UCN', precision='single') - >>> ucnobj.list_records() + >>> ucnobj.headers >>> rec = ucnobj.get_data(kstpkper=(0, 0)) """ @@ -829,7 +872,7 @@ class HeadUFile(BinaryLayerFile): >>> import flopy.utils.binaryfile as bf >>> hdobj = bf.HeadUFile('model.hds') - >>> hdobj.list_records() + >>> hdobj.headers >>> usgheads = hdobj.get_data(kstpkper=(0, 49)) """ @@ -865,7 +908,9 @@ def _get_data_array(self, totim=0.0): """ if totim >= 0.0: - keyindices = np.where(self.recordarray["totim"] == totim)[0] + keyindices = np.asarray( + self.recordarray["totim"] == totim + ).nonzero()[0] if len(keyindices) == 0: msg = f"totim value ({totim}) not found in file..." raise Exception(msg) @@ -919,7 +964,7 @@ def get_ts(self, idx): values must be zero based. Returns - ---------- + ------- out : numpy array Array has size (ntimes, ncells + 1). The first column in the data array will contain time (totim). @@ -979,7 +1024,7 @@ class CellBudgetFile: >>> import flopy.utils.binaryfile as bf >>> cbb = bf.CellBudgetFile('mymodel.cbb') - >>> cbb.list_records() + >>> cbb.headers >>> rec = cbb.get_data(kstpkper=(0,0), text='RIVER LEAKAGE') """ @@ -1015,7 +1060,6 @@ def __init__( self.imethlist = [] self.paknamlist_from = [] self.paknamlist_to = [] - self.nrecords = 0 self.compact = True # compact budget file flag self.dis = None @@ -1068,6 +1112,26 @@ def __enter__(self): def __exit__(self, *exc): self.close() + def __len__(self): + """ + Return the number of records (headers) in the file. + """ + return len(self.recordarray) + + @property + def nrecords(self): + """ + Return the number of records (headers) in the file. + + .. deprecated:: 3.8.0 + Use :meth:`len` instead. + """ + warnings.warn( + "obj.nrecords is deprecated; use len(obj) instead.", + DeprecationWarning, + ) + return len(self) + def __reset(self): """ Reset indexing lists when determining precision @@ -1082,7 +1146,6 @@ def __reset(self): self.imethlist = [] self.paknamlist_from = [] self.paknamlist_to = [] - self.nrecords = 0 def _set_precision(self, precision="single"): """ @@ -1098,7 +1161,7 @@ def _set_precision(self, precision="single"): h1dt = [ ("kstp", "i4"), ("kper", "i4"), - ("text", "a16"), + ("text", "S16"), ("ncol", "i4"), ("nrow", "i4"), ("nlay", "i4"), @@ -1121,10 +1184,10 @@ def _set_precision(self, precision="single"): ("delt", ffmt), ("pertim", ffmt), ("totim", ffmt), - ("modelnam", "a16"), - ("paknam", "a16"), - ("modelnam2", "a16"), - ("paknam2", "a16"), + ("modelnam", "S16"), + ("paknam", "S16"), + ("modelnam2", "S16"), + ("paknam2", "S16"), ] self.header1_dtype = np.dtype(h1dt) self.header2_dtype0 = np.dtype(h2dt0) @@ -1134,7 +1197,7 @@ def _set_precision(self, precision="single"): try: self._build_index() - except BudgetIndexError: + except (BudgetIndexError, EOFError): success = False self.__reset() @@ -1156,8 +1219,6 @@ def _totim_from_kstpkper(self, kstpkper): kstp_len = [dt1] for i in range(kstp + 1): kstp_len.append(kstp_len[-1] * tsmult) - # kstp_len = np.array(kstp_len) - # kstp_len = kstp_len[:kstp].sum() kstp_len = sum(kstp_len[: kstp + 1]) return kper_len + kstp_len @@ -1166,20 +1227,14 @@ def _build_index(self): Build the ordered dictionary, which maps the header information to the position in the binary file. """ - asciiset = " " - for i in range(33, 127): - asciiset += chr(i) - # read first record header = self._get_header() nrow = header["nrow"] ncol = header["ncol"] - text = header["text"] - if isinstance(text, bytes): - text = text.decode() + text = header["text"].decode("ascii").strip() if nrow < 0 or ncol < 0: raise Exception("negative nrow, ncol") - if not text.endswith("FLOW-JA-FACE"): + if text != "FLOW-JA-FACE": self.nrow = nrow self.ncol = ncol self.nlay = np.abs(header["nlay"]) @@ -1192,7 +1247,6 @@ def _build_index(self): while ipos < self.totalbytes: self.iposheader.append(ipos) header = self._get_header() - self.nrecords += 1 totim = header["totim"] # if old-style (non-compact) file, # compute totim from kstp and kper @@ -1208,17 +1262,14 @@ def _build_index(self): self.kstpkper.append(kstpkper) if header["text"] not in self.textlist: # check the precision of the file using text records - try: - tlist = [header["text"], header["modelnam"]] - for text in tlist: - if isinstance(text, bytes): - text = text.decode() - for t in text: - if t.upper() not in asciiset: - raise Exception() - - except: - raise BudgetIndexError("Improper precision") + tlist = [header["text"], header["modelnam"]] + for text in tlist: + if len(text) == 0: + continue + charbytes = list(text) + if min(charbytes) < 32 or max(charbytes) > 126: + # not in conventional ASCII range + raise BudgetIndexError("Improper precision") self.textlist.append(header["text"]) self.imethlist.append(header["imeth"]) if header["paknam"] not in self.paknamlist_from: @@ -1245,23 +1296,15 @@ def _build_index(self): "paknam2", ]: s = header[itxt] - if isinstance(s, bytes): - s = s.decode() print(f"{itxt}: {s}") print("file position: ", ipos) - if ( - header["imeth"].item() != 5 - and header["imeth"].item() != 6 - and header["imeth"].item() != 7 - ): + if header["imeth"].item() not in {5, 6, 7}: print("") # set the nrow, ncol, and nlay if they have not been set if self.nrow == 0: - text = header["text"] - if isinstance(text, bytes): - text = text.decode() - if not text.endswith("FLOW-JA-FACE"): + text = header["text"].decode("ascii").strip() + if text != "FLOW-JA-FACE": self.nrow = header["nrow"] self.ncol = header["ncol"] self.nlay = np.abs(header["nlay"]) @@ -1285,6 +1328,28 @@ def _build_index(self): self.iposarray = np.array(self.iposarray, dtype=np.int64) self.nper = self.recordarray["kper"].max() + # provide headers as a pandas frame + self.headers = pd.DataFrame(self.recordarray, index=self.iposarray) + # remove irrelevant columns + cols = self.headers.columns.to_list() + unique_imeth = self.headers["imeth"].unique() + if unique_imeth.max() == 0: + drop_cols = cols[cols.index("imeth") :] + elif 6 not in unique_imeth: + drop_cols = cols[cols.index("modelnam") :] + else: + drop_cols = [] + if drop_cols: + self.headers.drop(columns=drop_cols, inplace=True) + for name in self.headers.columns: + dtype = self.header_dtype[name] + if np.issubdtype(dtype, bytes): # convert to str + self.headers[name] = ( + self.headers[name] + .str.decode("ascii", "strict") + .str.strip() + ) + def _skip_record(self, header): """ Skip over this record, not counting header and header2. @@ -1294,51 +1359,47 @@ def _skip_record(self, header): nrow = header["nrow"] ncol = header["ncol"] imeth = header["imeth"] + realtype_nbytes = self.realtype(1).nbytes if imeth == 0: - nbytes = nrow * ncol * nlay * self.realtype(1).nbytes + nbytes = nrow * ncol * nlay * realtype_nbytes elif imeth == 1: - nbytes = nrow * ncol * nlay * self.realtype(1).nbytes + nbytes = nrow * ncol * nlay * realtype_nbytes elif imeth == 2: nlist = binaryread(self.file, np.int32)[0] - nbytes = nlist * (np.int32(1).nbytes + self.realtype(1).nbytes) + nbytes = nlist * (4 + realtype_nbytes) elif imeth == 3: - nbytes = nrow * ncol * self.realtype(1).nbytes - nbytes += nrow * ncol * np.int32(1).nbytes + nbytes = nrow * ncol * realtype_nbytes + (nrow * ncol * 4) elif imeth == 4: - nbytes = nrow * ncol * self.realtype(1).nbytes + nbytes = nrow * ncol * realtype_nbytes elif imeth == 5: nauxp1 = binaryread(self.file, np.int32)[0] naux = nauxp1 - 1 - - for i in range(naux): - temp = binaryread(self.file, str, charlen=16) + naux_nbytes = naux * 16 + if naux_nbytes: + check = self.file.seek(naux_nbytes, 1) + if check < naux_nbytes: + raise EOFError nlist = binaryread(self.file, np.int32)[0] if self.verbose: print("naux: ", naux) print("nlist: ", nlist) print("") - nbytes = nlist * ( - np.int32(1).nbytes - + self.realtype(1).nbytes - + naux * self.realtype(1).nbytes - ) + nbytes = nlist * (4 + realtype_nbytes + naux * realtype_nbytes) elif imeth == 6: # read rest of list data nauxp1 = binaryread(self.file, np.int32)[0] naux = nauxp1 - 1 - - for i in range(naux): - temp = binaryread(self.file, str, charlen=16) + naux_nbytes = naux * 16 + if naux_nbytes: + check = self.file.seek(naux_nbytes, 1) + if check < naux_nbytes: + raise EOFError nlist = binaryread(self.file, np.int32)[0] if self.verbose: print("naux: ", naux) print("nlist: ", nlist) print("") - nbytes = nlist * ( - np.int32(1).nbytes * 2 - + self.realtype(1).nbytes - + naux * self.realtype(1).nbytes - ) + nbytes = nlist * (4 * 2 + realtype_nbytes + naux * realtype_nbytes) else: raise Exception(f"invalid method code {imeth}") if nbytes != 0: @@ -1362,10 +1423,10 @@ def _get_header(self): for name in temp.dtype.names: header2[name] = temp[name] if header2["imeth"].item() == 6: - header2["modelnam"] = binaryread(self.file, str, charlen=16) - header2["paknam"] = binaryread(self.file, str, charlen=16) - header2["modelnam2"] = binaryread(self.file, str, charlen=16) - header2["paknam2"] = binaryread(self.file, str, charlen=16) + header2["modelnam"] = binaryread(self.file, bytes, charlen=16) + header2["paknam"] = binaryread(self.file, bytes, charlen=16) + header2["modelnam2"] = binaryread(self.file, bytes, charlen=16) + header2["paknam2"] = binaryread(self.file, bytes, charlen=16) else: header2 = np.array( [(0, 0.0, 0.0, 0.0, "", "", "", "")], dtype=self.header2_dtype @@ -1420,7 +1481,14 @@ def _find_paknam(self, paknam, to=False): def list_records(self): """ Print a list of all of the records in the file + + .. deprecated:: 3.8.0 + Use :attr:`headers` instead. """ + warnings.warn( + "list_records() is deprecated; use headers instead.", + DeprecationWarning, + ) for rec in self.recordarray: if isinstance(rec, bytes): rec = rec.decode() @@ -1429,7 +1497,15 @@ def list_records(self): def list_unique_records(self): """ Print a list of unique record names + + .. deprecated:: 3.8.0 + Use `headers[["text", "imeth"]].drop_duplicates()` instead. """ + warnings.warn( + "list_unique_records() is deprecated; use " + 'headers[["text", "imeth"]].drop_duplicates() instead.', + DeprecationWarning, + ) print("RECORD IMETH") print(22 * "-") for rec, imeth in zip(self.textlist, self.imethlist): @@ -1440,7 +1516,17 @@ def list_unique_records(self): def list_unique_packages(self, to=False): """ Print a list of unique package names + + .. deprecated:: 3.8.0 + Use `headers.paknam.drop_duplicates()` or + `headers.paknam2.drop_duplicates()` instead. """ + warnings.warn( + "list_unique_packages() is deprecated; use " + "headers.paknam.drop_duplicates() or " + "headers.paknam2.drop_duplicates() instead", + DeprecationWarning, + ) for rec in self._unique_package_names(to): if isinstance(rec, bytes): rec = rec.decode() @@ -1456,7 +1542,7 @@ def get_unique_record_names(self, decode=False): Optional boolean used to decode byte strings (default is False). Returns - ---------- + ------- names : list of strings List of unique text names in the binary file. @@ -1481,7 +1567,7 @@ def get_unique_package_names(self, decode=False, to=False): Optional boolean used to decode byte strings (default is False). Returns - ---------- + ------- names : list of strings List of unique package names in the binary file. @@ -1502,7 +1588,7 @@ def _unique_package_names(self, to=False): Get a list of unique package names in the file Returns - ---------- + ------- out : list of strings List of unique package names in the binary file. @@ -1533,7 +1619,7 @@ def get_indices(self, text=None): 'RIVER LEAKAGE', 'STORAGE', 'FLOW RIGHT FACE', etc. Returns - ---------- + ------- out : tuple indices of selected record name in budget file. @@ -1541,7 +1627,9 @@ def get_indices(self, text=None): # check and make sure that text is in file if text is not None: text16 = self._find_text(text) - select_indices = np.where(self.recordarray["text"] == text16) + select_indices = np.asarray( + self.recordarray["text"] == text16 + ).nonzero() if isinstance(select_indices, tuple): select_indices = select_indices[0] else: @@ -1614,7 +1702,7 @@ def get_data( 'COMPACT BUDGET' MODFLOW budget file. (Default is False.) Returns - ---------- + ------- recordlist : list of records A list of budget objects. The structure of the returned object depends on the structure of the data in the cbb file. @@ -1720,7 +1808,7 @@ def get_ts(self, idx, text=None, times=None): List of times to from which to get time series. Returns - ---------- + ------- out : numpy array Array has size (ntimes, ncells + 1). The first column in the data array will contain time (totim). @@ -1806,7 +1894,7 @@ def get_ts(self, idx, text=None, times=None): for vv in v: field = vv.dtype.names[2] - dix = np.where(np.isin(vv["node"], ndx))[0] + dix = np.asarray(np.isin(vv["node"], ndx)).nonzero()[0] if len(dix) > 0: result[itim, 1:] = vv[field][dix] @@ -1867,7 +1955,7 @@ def get_record(self, idx, full3D=False): 'COMPACT BUDGET' MODFLOW budget file. (Default is False.) Returns - ---------- + ------- record : a single data record The structure of the returned object depends on the structure of the data in the cbb file. Compact list data are returned as @@ -1895,9 +1983,7 @@ def get_record(self, idx, full3D=False): self.file.seek(ipos, 0) imeth = header["imeth"][0] - t = header["text"][0] - if isinstance(t, bytes): - t = t.decode("utf-8") + t = header["text"][0].decode("ascii") s = f"Returning {t.strip()} as " nlay = abs(header["nlay"][0]) @@ -1983,10 +2069,8 @@ def get_record(self, idx, full3D=False): naux = nauxp1 - 1 l = [("node", np.int32), ("q", self.realtype)] for i in range(naux): - auxname = binaryread(self.file, str, charlen=16) - if not isinstance(auxname, str): - auxname = auxname.decode() - l.append((auxname.strip(), self.realtype)) + auxname = binaryread(self.file, bytes, charlen=16) + l.append((auxname.decode("ascii").strip(), self.realtype)) dtype = np.dtype(l) nlist = binaryread(self.file, np.int32)[0] data = binaryread(self.file, dtype, shape=(nlist,)) @@ -2008,10 +2092,8 @@ def get_record(self, idx, full3D=False): naux = nauxp1 - 1 l = [("node", np.int32), ("node2", np.int32), ("q", self.realtype)] for i in range(naux): - auxname = binaryread(self.file, str, charlen=16) - if not isinstance(auxname, str): - auxname = auxname.decode() - l.append((auxname.strip(), self.realtype)) + auxname = binaryread(self.file, bytes, charlen=16) + l.append((auxname.decode("ascii").strip(), self.realtype)) dtype = np.dtype(l) nlist = binaryread(self.file, np.int32)[0] data = binaryread(self.file, dtype, shape=(nlist,)) @@ -2047,7 +2129,7 @@ def __create3D(self, data): Dictionary with node keywords and flows (q) items. Returns - ---------- + ------- out : numpy masked array List contains unique simulation times (totim) in binary file. @@ -2065,7 +2147,7 @@ def get_times(self): Get a list of unique times in the file Returns - ---------- + ------- out : list of floats List contains unique simulation times (totim) in binary file. @@ -2078,12 +2160,17 @@ def get_nrecords(self): Returns ------- - - out : int + int Number of records in the file. + .. deprecated:: 3.8.0 + Use :meth:`len` instead. """ - return self.recordarray.shape[0] + warnings.warn( + "get_nrecords is deprecated; use len(obj) instead.", + DeprecationWarning, + ) + return len(self) def get_residual(self, totim, scaled=False): """ @@ -2114,7 +2201,9 @@ def get_residual(self, totim, scaled=False): residual = np.zeros((nlay, nrow, ncol), dtype=float) if scaled: inflow = np.zeros((nlay, nrow, ncol), dtype=float) - select_indices = np.where(self.recordarray["totim"] == totim)[0] + select_indices = np.asarray( + self.recordarray["totim"] == totim + ).nonzero()[0] for i in select_indices: text = self.recordarray[i]["text"].decode() @@ -2125,9 +2214,9 @@ def get_residual(self, totim, scaled=False): residual -= flow[:, :, :] residual[:, :, 1:] += flow[:, :, :-1] if scaled: - idx = np.where(flow < 0.0) + idx = np.asarray(flow < 0.0).nonzero() inflow[idx] -= flow[idx] - idx = np.where(flow > 0.0) + idx = np.asarray(flow > 0.0).nonzero() l, r, c = idx idx = (l, r, c + 1) inflow[idx] += flow[idx] @@ -2135,9 +2224,9 @@ def get_residual(self, totim, scaled=False): residual -= flow[:, :, :] residual[:, 1:, :] += flow[:, :-1, :] if scaled: - idx = np.where(flow < 0.0) + idx = np.asarray(flow < 0.0).nonzero() inflow[idx] -= flow[idx] - idx = np.where(flow > 0.0) + idx = np.asarray(flow > 0.0).nonzero() l, r, c = idx idx = (l, r + 1, c) inflow[idx] += flow[idx] @@ -2145,16 +2234,16 @@ def get_residual(self, totim, scaled=False): residual -= flow[:, :, :] residual[1:, :, :] += flow[:-1, :, :] if scaled: - idx = np.where(flow < 0.0) + idx = np.asarray(flow < 0.0).nonzero() inflow[idx] -= flow[idx] - idx = np.where(flow > 0.0) + idx = np.asarray(flow > 0.0).nonzero() l, r, c = idx idx = (l + 1, r, c) inflow[idx] += flow[idx] else: residual += flow if scaled: - idx = np.where(flow > 0.0) + idx = np.asarray(flow > 0.0).nonzero() inflow[idx] += flow[idx] if scaled: @@ -2173,21 +2262,23 @@ def close(self): def reverse(self, filename: Optional[os.PathLike] = None): """ - Write a binary cell budget file with the records in reverse order. - If a new filename is not provided, or if the filename is the same - as the existing filename, the file will be overwritten and data - reloaded from the rewritten/reversed file. + Reverse the time order and signs of the currently loaded binary cell budget + file. If a file name is not provided or if the provided name is the same as + the existing filename, the file will be overwritten and reloaded. - Parameters - ---------- + Notes + ----- + While `HeadFile.reverse()` reverses only the temporal order of head data, + this method must reverse not only the order but also the sign (direction) + of the model's intercell flows. filename : str or PathLike, optional - Path of the new reversed binary cell budget file to create. + Path of the reversed binary cell budget file. """ filename = ( Path(filename).expanduser().absolute() - if filename + if filename is not None else self.filename ) @@ -2232,10 +2323,12 @@ def reverse(self, filename: Optional[os.PathLike] = None): tsimtotal += tpd[0] # get number of records - nrecords = self.get_nrecords() + nrecords = len(self) # open backward budget file - with open(filename, "wb") as fbin: + temp_dir_path = Path(tempfile.gettempdir()) + temp_file_path = temp_dir_path / filename.name + with open(temp_file_path, "wb") as f: # loop over budget file records in reverse order for idx in range(nrecords - 1, -1, -1): # load header array @@ -2270,7 +2363,7 @@ def reverse(self, filename: Optional[os.PathLike] = None): ] # Note: much of the code below is based on binary_file_writer.py h = np.array(h, dtype=dt1) - h.tofile(fbin) + h.tofile(f) if header["imeth"] == 6: # Write additional header information to the backward budget file h = header[ @@ -2282,7 +2375,7 @@ def reverse(self, filename: Optional[os.PathLike] = None): ] ] h = np.array(h, dtype=dt2) - h.tofile(fbin) + h.tofile(f) # Load data data = self.get_data(idx)[0] data = np.array(data) @@ -2293,7 +2386,7 @@ def reverse(self, filename: Optional[os.PathLike] = None): ndat = len(colnames) - 2 dt = np.dtype([("ndat", np.int32)]) h = np.array([(ndat,)], dtype=dt) - h.tofile(fbin) + h.tofile(f) # Write auxiliary column names naux = ndat - 1 if naux > 0: @@ -2305,12 +2398,12 @@ def reverse(self, filename: Optional[os.PathLike] = None): [(colname, "S16") for colname in colnames[3:]] ) h = np.array(auxtxt, dtype=dt) - h.tofile(fbin) + h.tofile(f) # Write nlist nlist = data.shape[0] dt = np.dtype([("nlist", np.int32)]) h = np.array([(nlist,)], dtype=dt) - h.tofile(fbin) + h.tofile(f) elif header["imeth"] == 1: # Load data data = self.get_data(idx)[0][0][0] @@ -2320,7 +2413,14 @@ def reverse(self, filename: Optional[os.PathLike] = None): else: raise ValueError("not expecting imeth " + header["imeth"]) # Write data - data.tofile(fbin) + data.tofile(f) + + # if we're rewriting the original file, close it first + if filename == self.filename: + self.close() + + # move temp file to destination + temp_file_path.replace(filename) # if we rewrote the original file, reinitialize if filename == self.filename: diff --git a/flopy/utils/check.py b/flopy/utils/check.py index 69f8c2ba5..432dc5221 100644 --- a/flopy/utils/check.py +++ b/flopy/utils/check.py @@ -254,8 +254,6 @@ def _get_summary_array(self, array=None): if array is None: return np.recarray((0), dtype=dtype) ra = recarray(array, dtype) - # at = array.transpose() - # a = np.core.records.fromarrays(at, dtype=dtype) return ra def _txt_footer( @@ -360,9 +358,6 @@ def _list_spd_check_violations( stress_period_data where criteria=True. """ inds_col = self._get_cell_inds_names() - # inds = stress_period_data[criteria][inds_col]\ - # .reshape(stress_period_data[criteria].shape + (-1,)) - # inds = np.atleast_2d(np.squeeze(inds.tolist())) inds = stress_period_data[criteria] a = self._get_cellid_cols(inds, inds_col) inds = a.view(int) @@ -489,15 +484,6 @@ def stress_period_data_values( name, k,i,j indices, values, and description of error for each row in stress_period_data where criteria=True. """ - # check for valid cell indices - # self._stress_period_data_valid_indices(stress_period_data) - - # first check for and list nan values - # self._stress_period_data_nans(stress_period_data) - - # next check for BCs in inactive cells - # self._stress_period_data_inactivecells(stress_period_data) - if np.any(criteria): # list the values that met the criteria sa = self._list_spd_check_violations( @@ -521,7 +507,7 @@ def values(self, a, criteria, error_name="", error_type="Warning"): True value in criteria. """ if np.any(criteria): - inds = np.where(criteria) + inds = np.asarray(criteria).nonzero() v = a[inds] # works with structured or unstructured pn = [self.package.name] * len(v) en = [error_name] * len(v) diff --git a/flopy/utils/compare.py b/flopy/utils/compare.py index 7aad59637..2e5f3fd6b 100644 --- a/flopy/utils/compare.py +++ b/flopy/utils/compare.py @@ -40,7 +40,7 @@ def _diffmax(v1, v2): diff = abs(v1 - v2) diffmax = diff.max() - return diffmax, np.where(diff == diffmax) + return diffmax, np.asarray(diff == diffmax).nonzero() def _difftol(v1, v2, tol): @@ -75,7 +75,7 @@ def _difftol(v1, v2, tol): raise Exception(err) diff = abs(v1 - v2) - return diff.max(), np.where(diff > tol) + return diff.max(), np.asarray(diff > tol).nonzero() def compare_budget( @@ -850,8 +850,6 @@ def compare_heads( v1 = h1.flatten()[ind] v2 = h2.flatten()[ind] d12 = v1 - v2 - # e += ' ' + fmtn.format(jdx + 1) + ' node: ' - # e += fmtn.format(ind + 1) # convert to one-based e += " " + fmtn.format(jdx + 1) e += f" {iv}" e += " -- " diff --git a/flopy/utils/cvfdutil.py b/flopy/utils/cvfdutil.py index 3eb33d6e8..3a59031d2 100644 --- a/flopy/utils/cvfdutil.py +++ b/flopy/utils/cvfdutil.py @@ -1,3 +1,5 @@ +import warnings + import numpy as np import pandas as pd @@ -324,7 +326,7 @@ def gridlist_to_verts(gridlist): vertdict = {} icell = 0 for sg in gridlist: - ilays, irows, icols = np.where(sg.idomain > 0) + ilays, irows, icols = np.asarray(sg.idomain > 0).nonzero() for _, i, j in zip(ilays, irows, icols): v = sg.get_cell_vertices(i, j) vertdict[icell] = v + [v[0]] @@ -390,6 +392,10 @@ def gridlist_to_disv_gridprops(gridlist): be numbered according to consecutive numbering of active cells in the grid list. + This function is deprecated in 3.8 and will be removed in 3.9. Use the + functionality in flopy.utils.cvfdutil.Lgr() to create a DISV mesh for a + nested grid. + Parameters ---------- gridlist : list @@ -403,6 +409,13 @@ def gridlist_to_disv_gridprops(gridlist): modflow6 disv package. """ + warnings.warn( + "the gridlist_to_disv_gridprops function is deprecated and will be " + "removed in version 3.9. Use flopy.utils.cvfdutil.Lgr() instead, which " + "allows a nested grid to be created and exported to a DISV mesh.", + PendingDeprecationWarning, + ) + verts, iverts = gridlist_to_verts(gridlist) gridprops = get_disv_gridprops(verts, iverts) return gridprops diff --git a/flopy/utils/datafile.py b/flopy/utils/datafile.py index 6477ecfe0..ca922c516 100644 --- a/flopy/utils/datafile.py +++ b/flopy/utils/datafile.py @@ -5,6 +5,7 @@ """ import os +import warnings from pathlib import Path from typing import Union @@ -42,7 +43,7 @@ def __init__(self, filetype=None, precision="single"): ("kper", "i4"), ("pertim", floattype), ("totim", floattype), - ("text", "a16"), + ("text", "S16"), ("ncol", "i4"), ("nrow", "i4"), ("ilay", "i4"), @@ -55,7 +56,7 @@ def __init__(self, filetype=None, precision="single"): ("kper", "i4"), ("pertim", floattype), ("totim", floattype), - ("text", "a16"), + ("text", "S16"), ("ncol", "i4"), ("nrow", "i4"), ("ilay", "i4"), @@ -68,7 +69,7 @@ def __init__(self, filetype=None, precision="single"): ("kstp", "i4"), ("kper", "i4"), ("totim", floattype), - ("text", "a16"), + ("text", "S16"), ("ncol", "i4"), ("nrow", "i4"), ("ilay", "i4"), @@ -81,7 +82,7 @@ def __init__(self, filetype=None, precision="single"): ("kper", "i4"), ("pertim", floattype), ("totim", floattype), - ("text", "a16"), + ("text", "S16"), ("m1", "i4"), ("m2", "i4"), ("m3", "i4"), @@ -94,7 +95,7 @@ def __init__(self, filetype=None, precision="single"): ("kper", "i4"), ("pertim", floattype), ("totim", floattype), - ("text", "a16"), + ("text", "S16"), ("m1", "i4"), ("m2", "i4"), ("m3", "i4"), @@ -107,7 +108,7 @@ def __init__(self, filetype=None, precision="single"): ("kper", "i4"), ("pertim", floattype), ("totim", floattype), - ("text", "a16"), + ("text", "S16"), ("m1", "i4"), ("m2", "i4"), ("m3", "i4"), @@ -221,6 +222,18 @@ def __init__( angrot=0.0, ) + def __len__(self): + """ + Return the number of records (headers) in the file. + """ + return len(self.recordarray) + + def __enter__(self): + return self + + def __exit__(self, *exc): + self.close() + def to_shapefile( self, filename: Union[str, os.PathLike], @@ -252,7 +265,7 @@ def to_shapefile( Whether to print verbose output Returns - ---------- + ------- None See Also @@ -341,7 +354,7 @@ def plot( if filename_base is not None. (default is 'png') Returns - ---------- + ------- None See Also @@ -409,7 +422,7 @@ def _build_index(self): Build the recordarray and iposarray, which maps the header information to the position in the formatted file. """ - raise Exception( + raise NotImplementedError( "Abstract method _build_index called in LayerFile. " "This method needs to be overridden." ) @@ -417,17 +430,30 @@ def _build_index(self): def list_records(self): """ Print a list of all of the records in the file - obj.list_records() + .. deprecated:: 3.8.0 + Use :attr:`headers` instead. """ + warnings.warn( + "list_records() is deprecated; use headers instead.", + DeprecationWarning, + ) for header in self.recordarray: print(header) return def get_nrecords(self): - if isinstance(self.recordarray, np.recarray): - return self.recordarray.shape[0] - return 0 + """ + Return the number of records (headers) in the file. + + .. deprecated:: 3.8.0 + Use :meth:`len` instead. + """ + warnings.warn( + "get_nrecords is deprecated; use len(obj) instead.", + DeprecationWarning, + ) + return len(self) def _get_data_array(self, totim=0): """ @@ -437,7 +463,9 @@ def _get_data_array(self, totim=0): """ if totim >= 0.0: - keyindices = np.where(self.recordarray["totim"] == totim)[0] + keyindices = np.asarray( + self.recordarray["totim"] == totim + ).nonzero()[0] if len(keyindices) == 0: msg = f"totim value ({totim}) not found in file..." raise Exception(msg) @@ -468,7 +496,7 @@ def get_times(self): Get a list of unique times in the file Returns - ---------- + ------- out : list of floats List contains unique simulation times (totim) in binary file. @@ -505,7 +533,7 @@ def get_data(self, kstpkper=None, idx=None, totim=None, mflay=None): all layers will be included. (Default is None.) Returns - ---------- + ------- data : numpy array Array has size (nlay, nrow, ncol) if mflay is None or it has size (nrow, ncol) if mlay is specified. @@ -519,10 +547,10 @@ def get_data(self, kstpkper=None, idx=None, totim=None, mflay=None): if kstpkper is not None: kstp1 = kstpkper[0] + 1 kper1 = kstpkper[1] + 1 - idx = np.where( + idx = np.asarray( (self.recordarray["kstp"] == kstp1) & (self.recordarray["kper"] == kper1) - ) + ).nonzero() if idx[0].shape[0] == 0: raise Exception( f"get_data() error: kstpkper not found:{kstpkper}" @@ -556,7 +584,7 @@ def get_alldata(self, mflay=None, nodata=-9999): nodata value will be assigned np.nan. Returns - ---------- + ------- data : numpy array Array has size (ntimes, nlay, nrow, ncol) if mflay is None or it has size (ntimes, nrow, ncol) if mlay is specified. @@ -584,7 +612,7 @@ def _read_data(self, shp): Read data from file """ - raise Exception( + raise NotImplementedError( "Abstract method _read_data called in LayerFile. " "This method needs to be overridden." ) diff --git a/flopy/utils/flopy_io.py b/flopy/utils/flopy_io.py index c3f94b62f..403f3e7a9 100644 --- a/flopy/utils/flopy_io.py +++ b/flopy/utils/flopy_io.py @@ -305,7 +305,7 @@ def flux_to_wel(cbc_file, text, precision="single", model=None, verbose=False): arr = arr[0] print(arr.max(), arr.min(), arr.sum()) # masked where zero - arr[np.where(arr == 0.0)] = np.nan + arr[np.asarray(arr == 0.0).nonzero()] = np.nan m4d[iper + 1] = arr iper += 1 diff --git a/flopy/utils/formattedfile.py b/flopy/utils/formattedfile.py index dd4c795ba..ca9436ed2 100644 --- a/flopy/utils/formattedfile.py +++ b/flopy/utils/formattedfile.py @@ -7,6 +7,7 @@ """ import numpy as np +import pandas as pd from ..utils.datafile import Header, LayerFile @@ -54,7 +55,7 @@ def read_header(self, text_file): the header Returns - ---------- + ------- out : numpy array of header information also stores the header's format string as self.format_string @@ -153,9 +154,15 @@ def _build_index(self): # self.recordarray contains a recordarray of all the headers. self.recordarray = np.array(self.recordarray, self.header.get_dtype()) - self.iposarray = np.array(self.iposarray) + self.iposarray = np.array(self.iposarray, dtype=np.int64) self.nlay = np.max(self.recordarray["ilay"]) + # provide headers as a pandas frame + self.headers = pd.DataFrame(self.recordarray, index=self.iposarray) + self.headers["text"] = self.headers["text"].str.decode( + "ascii", "strict" + ) + def _store_record(self, header, ipos): """ Store file header information in various formats for quick retrieval @@ -175,7 +182,7 @@ def _get_text_header(self): Return a text header object containing header formatting information """ - raise Exception( + raise NotImplementedError( "Abstract method _get_text_header called in FormattedLayerFile. " "This method needs to be overridden." ) @@ -257,7 +264,7 @@ def get_ts(self, idx): row, and column values must be zero based. Returns - ---------- + ------- out : numpy array Array has size (ntimes, ncells + 1). The first column in the data array will contain time (totim). @@ -296,7 +303,7 @@ def get_ts(self, idx): # Find the time index and then put value into result in the # correct location. - itim = np.where(result[:, 0] == header["totim"])[0] + itim = np.asarray(result[:, 0] == header["totim"]).nonzero()[0] result[itim, istat] = self._read_val(j) istat += 1 return result @@ -354,7 +361,7 @@ class FormattedHeadFile(FormattedLayerFile): >>> import flopy.utils.formattedfile as ff >>> hdobj = ff.FormattedHeadFile('model.fhd', precision='single') - >>> hdobj.list_records() + >>> hdobj.headers >>> rec = hdobj.get_data(kstpkper=(0, 49)) >>> rec2 = ddnobj.get_data(totim=100.) diff --git a/flopy/utils/geometry.py b/flopy/utils/geometry.py index d22990f8a..578040d55 100644 --- a/flopy/utils/geometry.py +++ b/flopy/utils/geometry.py @@ -11,7 +11,7 @@ class Shape: """ Parent class for handling geo interfacing, do not instantiate directly - Parameters: + Parameters ---------- type : str shapetype string @@ -233,9 +233,9 @@ class MultiPolygon(Collection): Container for housing and describing multipolygon geometries (e.g. to be read or written to shapefiles or other geographic data formats) - Parameters: + Parameters ---------- - polygons : list + polygons : list, tuple, default () list of flopy.utils.geometry.Polygon objects """ @@ -261,9 +261,9 @@ class MultiLineString(Collection): Container for housing and describing multilinestring geometries (e.g. to be read or written to shapefiles or other geographic data formats) - Parameters: + Parameters ---------- - polygons : list + linestrings : list, tuple, default () list of flopy.utils.geometry.LineString objects """ @@ -289,9 +289,9 @@ class MultiPoint(Collection): Container for housing and describing multipoint geometries (e.g. to be read or written to shapefiles or other geographic data formats) - Parameters: + Parameters ---------- - polygons : list + points : list, tuple, default () list of flopy.utils.geometry.Point objects """ @@ -872,9 +872,9 @@ def point_in_polygon(xc, yc, polygon): yc - polygon[i][1] ) / (polygon[j][1] - polygon[i][1]) - comp = np.where( + comp = np.asarray( ((polygon[i][1] > yc) ^ (polygon[j][1] > yc)) & (xc < tmp) - ) + ).nonzero() j = i if len(comp[0]) > 0: diff --git a/flopy/utils/get_modflow.py b/flopy/utils/get_modflow.py index 4666fcfe4..fae43b063 100755 --- a/flopy/utils/get_modflow.py +++ b/flopy/utils/get_modflow.py @@ -62,7 +62,8 @@ def get_ostag() -> str: elif sys.platform.startswith("win"): return "win" + ("64" if sys.maxsize > 2**32 else "32") elif sys.platform.startswith("darwin"): - return "mac" + arch = processor() + return "mac" + (arch if arch == "arm" else "") raise ValueError(f"platform {sys.platform!r} not supported") @@ -407,19 +408,9 @@ def run_main( # get the selected release release = get_release(owner, repo, release_id, quiet) assets = release.get("assets", []) - asset_names = [a["name"] for a in assets] for asset in assets: asset_name = asset["name"] if ostag in asset_name: - # temporary hack for nightly gfortran build for ARM macs - # todo: clean up if/when all repos have an ARM mac build - if ( - repo == "modflow6-nightly-build" - and "macarm.zip" in asset_names - and processor() == "arm" - and ostag == "mac.zip" - ): - continue break else: raise ValueError( @@ -608,7 +599,7 @@ def add_item(key, fname, do_chmod): break shutil.rmtree(str(bindir_path)) - if ostag in ["linux", "mac", "macarm"]: + if "win" not in ostag: # similar to "chmod +x fname" for each executable for fname in chmod: pth = bindir / fname diff --git a/flopy/utils/gridgen.py b/flopy/utils/gridgen.py index 9fb46f4fc..437f3a0be 100644 --- a/flopy/utils/gridgen.py +++ b/flopy/utils/gridgen.py @@ -14,7 +14,7 @@ from ..modflow import ModflowDis from ..utils import import_optional_dependency from ..utils.flopy_io import relpath_safe -from .util_array import Util2d # read1d, +from .util_array import Util2d # todo # creation of line and polygon shapefiles from features (holes!) @@ -196,11 +196,11 @@ class Gridgen: where intermediate layers are inactive. (default is False) **kwargs - verical_smoothing_level : int + smoothing_level_vertical : int maximum level difference between two vertically adjacent cells. Adjust with caution, as adjustments can cause unexpected results to simulated flows - horizontal_smoothing_level : int + smoothing_level_horizontal : int maximum level difference between two horizontally adjacent cells. Adjust with caution, as adjustments can cause unexpected results to simulated flows @@ -733,7 +733,7 @@ def plot( shapename = os.path.join(self.model_ws, "qtgrid") xmin, xmax, ymin, ymax = shapefile_extents(shapename) - idx = np.where(self.qtra.layer == layer)[0] + idx = np.asarray(self.qtra.layer == layer).nonzero()[0] pc = plot_shapefile( shapename, diff --git a/flopy/utils/gridintersect.py b/flopy/utils/gridintersect.py index 16c14a272..eed25d310 100644 --- a/flopy/utils/gridintersect.py +++ b/flopy/utils/gridintersect.py @@ -913,10 +913,6 @@ def _intersect_point_shapely2( keep_cid = qcellids names = ["cellids", "ixshapes"] - # self.mfgrid.grid_type == "structured": - # cid_dtype = "i" - # else: - # cid_dtype = "O" formats = ["O", "O"] rec = np.recarray(len(keep_pts), names=names, formats=formats) @@ -2265,7 +2261,6 @@ def find_position_in_array(arr, x): xr = arr[j + 1] frac = (x - xl) / (xr - xl) if 0.0 <= frac <= 1.0: - # if min(xl, xr) <= x < max(xl, xr): jpos.append(j) if len(jpos) == 0: return None diff --git a/flopy/utils/lgrutil.py b/flopy/utils/lgrutil.py index f382c97eb..467f46a70 100644 --- a/flopy/utils/lgrutil.py +++ b/flopy/utils/lgrutil.py @@ -2,6 +2,7 @@ from ..discretization import StructuredGrid from ..modflow import Modflow +from .cvfdutil import get_disv_gridprops from .util_array import Util2d, Util3d @@ -161,10 +162,10 @@ def __init__( # idomain assert idomainp.shape == (nlayp, nrowp, ncolp) self.idomain = idomainp - idxl, idxr, idxc = np.where(idomainp == 0) - assert idxl.shape[0] > 1, "no zero values found in idomain" + idxl, idxr, idxc = np.asarray(idomainp == 0).nonzero() + assert idxl.shape[0] > 0, "no zero values found in idomain" - # # child cells per parent and child cells per parent layer + # child cells per parent and child cells per parent layer self.ncpp = ncpp self.ncppl = Util2d(m, (nlayp,), np.int32, ncppl, "ncppl").array @@ -585,3 +586,425 @@ def child(self): yorigin, ) return simple_regular_grid + + def to_disv_gridprops(self): + """ + Create and return a gridprops dictionary that can be + used to create a disv grid (instead of a separate parent + and child representation). The gridprops dictionary can + be unpacked into the flopy.mf6.Modflowdisv() constructor + and flopy.discretization.VertexGrid() contructor. + + Note that export capability will only work if the parent + and child models have corresponding layers. + + Returns + ------- + gridprops : dict + Dictionary containing ncpl, nvert, vertices, cell2d, + nlay, top, and botm + + """ + return LgrToDisv(self).get_disv_gridprops() + + +class LgrToDisv: + def __init__(self, lgr): + """ + Helper class used to convert and Lgr() object into + the grid properties needed to create a disv vertex + nested grid. After instantiation, self.verts and + self.iverts are available. + + The primary work of this class is identify hanging + vertices along the shared parent-child boundary and + include these hanging vertices in the vertex indicence + list for parent cells. + + Parameters + ---------- + lgr : Lgr instance + Lgr() object describing a parent-child relation + + """ + + # store information + self.lgr = lgr + self.pgrid = lgr.parent.modelgrid + self.cgrid = lgr.child.modelgrid + + # count active parent and child cells + self.ncpl_parent = np.count_nonzero(self.pgrid.idomain[0] > 0) + self.ncpl_child = np.count_nonzero(self.cgrid.idomain[0] > 0) + self.ncpl = self.ncpl_child + self.ncpl_parent + + # find child vertices that act as hanging vertices on parent + # model cells + self.right_face_hanging = None + self.left_face_hanging = None + self.front_face_hanging = None + self.back_face_hanging = None + self.parent_ij_to_global = None + self.child_ij_to_global = None + self.find_hanging_vertices() + + # build global verts and iverts keeping only idomain > 0 + self.verts = None + self.iverts = None + self.build_verts_iverts() + + # todo: remove unused vertices? + + def find_hanging_vertices(self): + """ + Hanging vertices are vertices that must be included + along the edge of parent cells. These hanging vertices + mark the locations of corners of adjacent child cells. + Hanging vertices are not strictly + necessary to define the shape of a parent cell, but they are + required by modflow to describe connections between + parent and child cells. + + This routine finds hanging vertices parent cells along + a parent-child boundary. These hanging vertices are + stored in 4 member dictionaries, called right_face_hanging, + left_face_hanging, front_face_hanging, and back_face_hanging. + These dictionaries are used subsequently to insert + hanging vertices into the iverts array. + + """ + + # create dictionaries for parent left, right, back, and front + # faces that have a key that is parent (row, col) + # and a value that is a list of child vertex numbers + + # this list of child vertex numbers will be ordered from + # left to right (back/front) and from back to front (left/right) + # so when they are used later, two of them will need to be + # reversed so that clockwise ordering is maintained + + nrowc = self.lgr.nrow + ncolc = self.lgr.ncol + iverts = self.cgrid.iverts + cidomain = self.lgr.get_idomain() + + self.right_face_hanging = {} + self.left_face_hanging = {} + self.front_face_hanging = {} + self.back_face_hanging = {} + + # map (i, j) to global cell number + self.parent_ij_to_global = {} + self.child_ij_to_global = {} + + kc = 0 + nodec = 0 + for ic in range(nrowc): + for jc in range(ncolc): + plist = self.lgr.get_parent_connections(kc, ic, jc) + for (kp, ip, jp), idir in plist: + if cidomain[kc, ic, jc] == 0: + continue + + if ( + idir == -1 + ): # left child face connected to right parent face + # child vertices 0 and 3 added as hanging nodes + if (ip, jp) in self.right_face_hanging: + hlist = self.right_face_hanging.pop((ip, jp)) + else: + hlist = [] + ivlist = iverts[nodec] + for iv in (ivlist[0], ivlist[3]): + if iv not in hlist: + hlist.append(iv) + self.right_face_hanging[(ip, jp)] = hlist + + elif idir == 1: + # child vertices 1 and 2 added as hanging nodes + if (ip, jp) in self.left_face_hanging: + hlist = self.left_face_hanging.pop((ip, jp)) + else: + hlist = [] + ivlist = iverts[nodec] + for iv in (ivlist[1], ivlist[2]): + if iv not in hlist: + hlist.append(iv) + self.left_face_hanging[(ip, jp)] = hlist + + elif idir == 2: + # child vertices 0 and 1 added as hanging nodes + if (ip, jp) in self.front_face_hanging: + hlist = self.front_face_hanging.pop((ip, jp)) + else: + hlist = [] + ivlist = iverts[nodec] + for iv in (ivlist[0], ivlist[1]): + if iv not in hlist: + hlist.append(iv) + self.front_face_hanging[(ip, jp)] = hlist + + elif idir == -2: + # child vertices 3 and 2 added as hanging nodes + if (ip, jp) in self.back_face_hanging: + hlist = self.back_face_hanging.pop((ip, jp)) + else: + hlist = [] + ivlist = iverts[nodec] + for iv in (ivlist[3], ivlist[2]): + if iv not in hlist: + hlist.append(iv) + self.back_face_hanging[(ip, jp)] = hlist + + nodec += 1 + + def build_verts_iverts(self): + """ + Build the verts and iverts members. self.verts is a 2d + numpy array of size (nvert, 2). Column 1 is x and column 2 + is y. self.iverts is a list of size ncpl (number of cells + per layer) with each entry being the list of vertex indices + that define the cell. + + """ + + # stack vertex arrays; these will have more points than necessary, + # because parent and child vertices will overlap at corners, but + # duplicate vertices will be filtered later + pverts = self.pgrid.verts + cverts = self.cgrid.verts + nverts_parent = pverts.shape[0] + nverts_child = cverts.shape[0] + verts = np.vstack((pverts, cverts)) + + # build iverts list first with active parent cells + iverts = [] + iglo = 0 + for i in range(self.pgrid.nrow): + for j in range(self.pgrid.ncol): + if self.pgrid.idomain[0, i, j] > 0: + ivlist = self.pgrid._build_structured_iverts(i, j) + + # merge hanging vertices if they exist + ivlist = self.merge_hanging_vertices(i, j, ivlist) + + iverts.append(ivlist) + self.parent_ij_to_global[(i, j)] = iglo + iglo += 1 + + # now add active child cells + for i in range(self.cgrid.nrow): + for j in range(self.cgrid.ncol): + if self.cgrid.idomain[0, i, j] > 0: + ivlist = [ + iv + nverts_parent + for iv in self.cgrid._build_structured_iverts(i, j) + ] + iverts.append(ivlist) + self.child_ij_to_global[(i, j)] = iglo + iglo += 1 + self.verts = verts + self.iverts = iverts + + def merge_hanging_vertices(self, ip, jp, ivlist): + """ + Given a list of vertices (ivlist) for parent row and column + (ip, jp) merge hanging vertices from adjacent child cells + into ivlist. + + Parameters + ---------- + ip : int + parent cell row number + + jp : int + parent cell column number + + ivlist : list of ints + list of vertex indices that define the parent + cell (ip, jp) + + Returns + ------- + ivlist : list of ints + modified list of vertices that now also contains + any hanging vertices needed to properly define + a parent cell adjacent to child cells + + """ + assert len(ivlist) == 4 + child_ivlist_offset = self.pgrid.verts.shape[0] + + # construct back edge + idx = 0 + reverse = False + face_hanging = self.back_face_hanging + back_edge = [ivlist[idx]] + if (ip, jp) in face_hanging: + hlist = face_hanging[(ip, jp)] + if len(hlist) > 2: + hlist = hlist[1:-1] # do not include two ends + hlist = [h + child_ivlist_offset for h in hlist] + if reverse: + hlist = hlist[::-1] + else: + hlist = [] + back_edge = [ivlist[idx]] + hlist + + # construct right edge + idx = 1 + reverse = False + face_hanging = self.right_face_hanging + right_edge = [ivlist[idx]] + if (ip, jp) in face_hanging: + hlist = face_hanging[(ip, jp)] + if len(hlist) > 2: + hlist = hlist[1:-1] # do not include two ends + hlist = [h + child_ivlist_offset for h in hlist] + if reverse: + hlist = hlist[::-1] + else: + hlist = [] + right_edge = [ivlist[idx]] + hlist + + # construct front edge + idx = 2 + reverse = True + face_hanging = self.front_face_hanging + front_edge = [ivlist[idx]] + if (ip, jp) in face_hanging: + hlist = face_hanging[(ip, jp)] + if len(hlist) > 2: + hlist = hlist[1:-1] # do not include two ends + hlist = [h + child_ivlist_offset for h in hlist] + if reverse: + hlist = hlist[::-1] + else: + hlist = [] + front_edge = [ivlist[idx]] + hlist + + # construct left edge + idx = 3 + reverse = True + face_hanging = self.left_face_hanging + left_edge = [ivlist[idx]] + if (ip, jp) in face_hanging: + hlist = face_hanging[(ip, jp)] + if len(hlist) > 2: + hlist = hlist[1:-1] # do not include two ends + hlist = [h + child_ivlist_offset for h in hlist] + if reverse: + hlist = hlist[::-1] + else: + hlist = [] + left_edge = [ivlist[idx]] + hlist + + ivlist = back_edge + right_edge + front_edge + left_edge + + return ivlist + + def get_xcyc(self): + """ + Construct a 2d array of size (nvert, 2) that + contains the cell centers. + + Returns + ------- + xcyc : ndarray + 2d array of x, y positions for cell centers + + """ + xcyc = np.empty((self.ncpl, 2)) + pidx = self.pgrid.idomain[0] > 0 + cidx = self.cgrid.idomain[0] > 0 + px = self.pgrid.xcellcenters[pidx].flatten() + cx = self.cgrid.xcellcenters[cidx].flatten() + xcyc[:, 0] = np.vstack( + (np.atleast_2d(px).T, np.atleast_2d(cx).T) + ).flatten() + py = self.pgrid.ycellcenters[pidx].flatten() + cy = self.cgrid.ycellcenters[cidx].flatten() + xcyc[:, 1] = np.vstack( + (np.atleast_2d(py).T, np.atleast_2d(cy).T) + ).flatten() + return xcyc + + def get_top(self): + """ + Construct a 1d array of size (ncpl) that + contains the cell tops. + + Returns + ------- + top : ndarray + 1d array of top elevations + + """ + top = np.empty((self.ncpl,)) + pidx = self.pgrid.idomain[0] > 0 + cidx = self.cgrid.idomain[0] > 0 + pa = self.pgrid.top[pidx].flatten() + ca = self.cgrid.top[cidx].flatten() + top[:] = np.hstack((pa, ca)) + return top + + def get_botm(self): + """ + Construct a 2d array of size (nlay, ncpl) that + contains the cell bottoms. + + Returns + ------- + botm : ndarray + 2d array of bottom elevations + + """ + botm = np.empty((self.lgr.nlay, self.ncpl)) + pidx = self.pgrid.idomain[0] > 0 + cidx = self.cgrid.idomain[0] > 0 + for k in range(self.lgr.nlay): + pa = self.pgrid.botm[k, pidx].flatten() + ca = self.cgrid.botm[k, cidx].flatten() + botm[k, :] = np.hstack((pa, ca)) + return botm + + def get_disv_gridprops(self): + """ + Create and return a gridprops dictionary that can be + used to create a disv grid (instead of a separate parent + and child representation). The gridprops dictionary can + be unpacked into the flopy.mf6.Modflowdisv() constructor + and flopy.discretization.VertexGrid() contructor. + + Note that export capability will only work if the parent + and child models have corresponding layers. + + Returns + ------- + gridprops : dict + Dictionary containing ncpl, nvert, vertices, cell2d, + nlay, top, and botm + + """ + + # check + assert ( + self.lgr.ncppl.min() == self.lgr.ncppl.max() + ), "Exporting disv grid properties requires ncppl to be 1." + assert ( + self.lgr.nlayp == self.lgr.nlay + ), "Exporting disv grid properties requires parent and child models to have the same number of layers." + for k in range(self.lgr.nlayp - 1): + assert np.allclose( + self.lgr.idomain[k], self.lgr.idomain[k + 1] + ), "Exporting disv grid properties requires parent idomain is same for all layers." + + # get information and build gridprops + xcyc = self.get_xcyc() + top = self.get_top() + botm = self.get_botm() + gridprops = get_disv_gridprops(self.verts, self.iverts, xcyc=xcyc) + gridprops["nlay"] = self.lgr.nlay + gridprops["top"] = top + gridprops["botm"] = botm + return gridprops diff --git a/flopy/utils/mflistfile.py b/flopy/utils/mflistfile.py index 38d4b2193..33f13b77f 100644 --- a/flopy/utils/mflistfile.py +++ b/flopy/utils/mflistfile.py @@ -183,7 +183,7 @@ def get_kstpkper(self): water budgets. Returns - ---------- + ------- out : list of (kstp, kper) tuples List of unique kstp, kper combinations in list file. kstp and kper values are zero-based. @@ -551,7 +551,7 @@ def get_reduced_pumping(self): file. Example - -------- + ------- >>> objLST = MfListBudget("my_model.lst") >>> raryReducedPpg = objLST.get_reduced_pumping() >>> dfReducedPpg = pd.DataFrame.from_records(raryReducedPpg) @@ -651,7 +651,6 @@ def _get_index(self, maxentries): line, ) break - # print('info found for timestep stress period',ts,sp) idxs.append([ts, sp, seekpoint]) diff --git a/flopy/utils/mfreadnam.py b/flopy/utils/mfreadnam.py index 47ab6da50..ba833b842 100644 --- a/flopy/utils/mfreadnam.py +++ b/flopy/utils/mfreadnam.py @@ -608,7 +608,6 @@ def get_mf6_files(mfnamefile): if len(olist) > 0: outplist = outplist + olist # terminate loop if no additional files - # if len(flist) < 1 and len(olist) < 1: if len(flist) < 1: break diff --git a/flopy/utils/modpathfile.py b/flopy/utils/modpathfile.py index 813160c5d..5bc3e2060 100644 --- a/flopy/utils/modpathfile.py +++ b/flopy/utils/modpathfile.py @@ -124,12 +124,12 @@ def intersect( cells = t cells = np.array(cells, dtype=raslice.dtype) - inds = np.in1d(raslice, cells) + inds = np.isin(raslice, cells) epdest = self._data[inds].copy().view(np.recarray) if to_recarray: # use particle ids to get the rest of the paths - inds = np.in1d(self._data["particleid"], epdest.particleid) + inds = np.isin(self._data["particleid"], epdest.particleid) series = self._data[inds].copy() series.sort(order=["particleid", "time"]) series = series.view(np.recarray) @@ -585,7 +585,7 @@ def get_maxtraveltime(self): Get the maximum travel time. Returns - ---------- + ------- out : float Maximum travel time. @@ -600,7 +600,7 @@ def get_alldata(self): ---------- Returns - ---------- + ------- data : numpy record array A numpy recarray with the endpoint particle data @@ -693,7 +693,7 @@ def get_destination_endpoint_data(self, dest_cells, source=False): dtype = np.dtype(dtype) dest_cells = np.array(dest_cells, dtype=dtype) - inds = np.in1d(raslice, dest_cells) + inds = np.isin(raslice, dest_cells) return data[inds].copy().view(np.recarray) def write_shapefile( diff --git a/flopy/utils/mtlistfile.py b/flopy/utils/mtlistfile.py index 5faf5eb3f..7bae5ea04 100644 --- a/flopy/utils/mtlistfile.py +++ b/flopy/utils/mtlistfile.py @@ -136,13 +136,6 @@ def parse( df_gw = pd.DataFrame(self.gw_data) df_gw.loc[:, "totim"] = df_gw.pop("totim_1") - # if cumulative: - # keep = [c for c in df_gw.columns if "_flx" not in c] - # df_gw = df_gw.loc[:,keep] - # else: - # keep = [c for c in df_gw.columns if "_cum" not in c] - # df_gw = df_gw.loc[:, keep] - if diff: df_gw = self._diff(df_gw) @@ -166,13 +159,6 @@ def parse( df_sw = pd.DataFrame(self.sw_data) df_sw.loc[:, "totim"] = df_gw.totim.iloc[:min_len].values - # if cumulative: - # keep = [c for c in df_sw.columns if "_flx" not in c] - # df_sw = df_sw.loc[:, keep] - # else: - # keep = [c for c in df_sw.columns if "_cum" not in c] - # df_sw = df_sw.loc[:, keep] - if diff: df_sw = self._diff(df_sw) if start_datetime is not None: @@ -463,10 +449,8 @@ def _parse_sw(self, f, line): f"error parsing 'out' SW items on line {self.lcount}: {e!s}" ) self._add_to_sw_data("net", item, cval, fval, comp) - # out_tots = self._parse_sw_line(line) def _parse_sw_line(self, line): - # print(line) raw = line.strip().split("=") citem = raw[0].strip().strip(r"[\|]").replace(" ", "_") cval = float(raw[1].split()[0]) @@ -476,7 +460,6 @@ def _parse_sw_line(self, line): else: fitem = raw[1].split()[-1].replace(" ", "_") fval = float(raw[2]) - # assert citem == fitem,"{0}, {1}".format(citem,fitem) return citem, cval, fval def _add_to_sw_data(self, inout, item, cval, fval, comp): diff --git a/flopy/utils/observationfile.py b/flopy/utils/observationfile.py index f2b612f8f..3e5308b3a 100644 --- a/flopy/utils/observationfile.py +++ b/flopy/utils/observationfile.py @@ -18,7 +18,7 @@ def get_times(self): Get a list of unique times in the file Returns - ---------- + ------- out : list of floats List contains unique simulation times (totim) in binary file. @@ -30,7 +30,7 @@ def get_ntimes(self): Get the number of times in the file Returns - ---------- + ------- out : int The number of simulation times (totim) in binary file. @@ -42,7 +42,7 @@ def get_nobs(self): Get the number of observations in the file Returns - ---------- + ------- out : tuple of int A tuple with the number of records and number of flow items in the file. The number of flow items is non-zero only if @@ -56,7 +56,7 @@ def get_obsnames(self): Get a list of observation names in the file Returns - ---------- + ------- out : list of strings List of observation names in the binary file. totim is not included in the list of observation names. @@ -82,7 +82,7 @@ def get_data(self, idx=None, obsname=None, totim=None): data for all simulation times are returned. (default is None) Returns - ---------- + ------- data : numpy record array Array has size (ntimes, nitems). totim is always returned. nitems is 2 if idx or obsname is not None or nobs+1. @@ -104,7 +104,7 @@ def get_data(self, idx=None, obsname=None, totim=None): i0 = 0 i1 = self.data.shape[0] if totim is not None: - idx = np.where(self.data["totim"] == totim)[0][0] + idx = np.asarray(self.data["totim"] == totim).nonzero()[0][0] i0 = idx i1 = idx + 1 elif idx is not None: @@ -183,7 +183,7 @@ def get_dataframe( i0 = 0 i1 = self.data.shape[0] if totim is not None: - idx = np.where(self.data["totim"] == totim)[0][0] + idx = np.asarray(self.data["totim"] == totim).nonzero()[0][0] i0 = idx i1 = idx + 1 elif idx is not None: @@ -236,7 +236,7 @@ def _build_dtype(self): Build the recordarray and iposarray, which maps the header information to the position in the formatted file. """ - raise Exception( + raise NotImplementedError( "Abstract method _build_dtype called in BinaryFiles. " "This method needs to be overridden." ) @@ -246,7 +246,7 @@ def _build_index(self): Build the recordarray and iposarray, which maps the header information to the position in the formatted file. """ - raise Exception( + raise NotImplementedError( "Abstract method _build_index called in BinaryFiles. " "This method needs to be overridden." ) @@ -308,10 +308,6 @@ def __init__(self, filename, verbose=False, isBinary="auto"): # get number of observations self.nobs = self.read_integer() - # # continue reading the file - # self.v = np.empty(self.nobs, dtype=float) - # self.v.fill(1.0E+32) - # read obsnames obsnames = [] for idx in range(0, self.nobs): diff --git a/flopy/utils/particletrackfile.py b/flopy/utils/particletrackfile.py index 5029c5860..50c004db8 100644 --- a/flopy/utils/particletrackfile.py +++ b/flopy/utils/particletrackfile.py @@ -60,7 +60,7 @@ def get_maxid(self) -> int: Get the maximum particle ID. Returns - ---------- + ------- out : int Maximum particle ID. @@ -72,7 +72,7 @@ def get_maxtime(self) -> float: Get the maximum tracking time. Returns - ---------- + ------- out : float Maximum tracking time. @@ -99,23 +99,23 @@ def get_data( Whether to return only the minimal, canonical fields. Default is False. Returns - ---------- + ------- data : np.recarray Recarray with dtype ParticleTrackFile.outdtype """ data = self._data[list(self.outdtype.names)] if minimal else self._data idx = ( - np.where(data["particleid"] == partid)[0] + np.asarray(data["particleid"] == partid).nonzero()[0] if totim is None else ( - np.where( + np.asarray( (data["time"] >= totim) & (data["particleid"] == partid) - )[0] + ).nonzero()[0] if ge - else np.where( + else np.asarray( (data["time"] <= totim) & (data["particleid"] == partid) - )[0] + ).nonzero()[0] ) ) @@ -136,22 +136,22 @@ def get_alldata(self, totim=None, ge=True, minimal=False): Whether to return only the minimal, canonical fields. Default is False. Returns - ---------- + ------- data : list of numpy record arrays List of recarrays with dtype ParticleTrackFile.outdtype """ - nids = np.unique(self._data["particleid"]).size + nids = np.unique(self._data["particleid"]) data = self._data[list(self.outdtype.names)] if minimal else self._data if totim is not None: idx = ( - np.where(data["time"] >= totim)[0] + np.asarray(data["time"] >= totim).nonzero()[0] if ge - else np.where(data["time"] <= totim)[0] + else np.asarray(data["time"] <= totim).nonzero()[0] ) if len(idx) > 0: data = data[idx] - return [data[data["particleid"] == i] for i in range(nids)] + return [data[data["particleid"] == i] for i in nids] def get_destination_data( self, dest_cells, to_recarray=True diff --git a/flopy/utils/rasters.py b/flopy/utils/rasters.py index 72e32eabf..4fabf0482 100644 --- a/flopy/utils/rasters.py +++ b/flopy/utils/rasters.py @@ -45,7 +45,7 @@ class Raster: """ - FLOAT32 = (float, np.float32, np.float64) + FLOAT32 = (float, np.float32) FLOAT64 = (np.float64,) INT8 = (np.int8, np.uint8) INT16 = (np.int16, np.uint16) @@ -95,10 +95,8 @@ def __init__( if isinstance(crs, CRS): pass - elif isinstance(crs, int): - crs = CRS.from_epsg(crs) - elif isinstance(crs, str): - crs = CRS.from_string(crs) + elif crs is not None: + crs = CRS.from_user_input(crs) else: TypeError("crs type not understood, provide an epsg or proj4") @@ -126,6 +124,13 @@ def __init__( if isinstance(rio_ds, rasterio.io.DatasetReader): self._dataset = rio_ds + @property + def crs(self): + """ + Returns a rasterio CRS object + """ + return self._meta["crs"] + @property def bounds(self): """ @@ -140,6 +145,13 @@ def bounds(self): return xmin, xmax, ymin, ymax + @property + def transform(self): + """ + Returns the affine transform for the raster + """ + return self._meta["transform"] + @property def bands(self): """ @@ -184,6 +196,126 @@ def ycenters(self): self.__xycenters() return self.__ycenters + def to_crs(self, crs=None, epsg=None, inplace=False): + """ + Method for re-projecting rasters from an existing CRS to a + new CRS + + Parameters + ---------- + crs : CRS user input of many different kinds + epsg : int + epsg code input that defines the coordinate system + inplace : bool + Boolean flag to indicate if the operation takes place "in place" + which reprojects the raster within the current object or the + default (False) to_crs() returns a reprojected Raster object + + Returns + ------- + Raster or None: returns a reprojected raster object if + inplace=False, otherwise the reprojected information + overwrites the current Raster object + + """ + from rasterio.crs import CRS + + if self.crs is None: + raise ValueError( + "Cannot transform naive geometries. " + "Please set a crs on the object first." + ) + if crs is not None: + dst_crs = CRS.from_user_input(crs) + elif epsg is not None: + dst_crs = CRS.from_epsg(epsg) + else: + raise ValueError("Must pass either crs or epsg.") + + # skip if the input CRS and output CRS are the exact same + if self.crs.to_epsg() == dst_crs.to_epsg(): + return self + + return self.__transform(dst_crs=dst_crs, inplace=inplace) + + def __transform(self, dst_crs, inplace): + """ + + Parameters + ---------- + dst_crs : rasterio.CRS object + inplace : bool + + Returns + ------- + Raster or None: returns a reprojected raster object if + inplace=False, otherwise the reprojected information + overwrites the current Raster object + + """ + import rasterio + from rasterio.io import MemoryFile + from rasterio.warp import ( + Resampling, + calculate_default_transform, + reproject, + ) + + height = self._meta["height"] + width = self._meta["width"] + xmin, xmax, ymin, ymax = self.bounds + + transform, width, height = calculate_default_transform( + self.crs, dst_crs, width, height, xmin, ymin, xmax, ymax + ) + + kwargs = { + "transform": transform, + "width": width, + "height": height, + "crs": dst_crs, + "nodata": self.nodatavals[0], + "driver": self._meta["driver"], + "count": self._meta["count"], + "dtype": self._meta["dtype"], + } + + with MemoryFile() as memfile: + with memfile.open(**kwargs) as dst: + for band in self.bands: + reproject( + source=self.get_array(band), + destination=rasterio.band(dst, band), + src_transform=self.transform, + src_crs=self.crs, + dst_transform=transform, + dst_crs=dst_crs, + resampling=Resampling.nearest, + ) + with memfile.open() as dataset: + array = dataset.read() + bands = dataset.indexes + meta = dataset.meta + + if inplace: + for ix, band in enumerate(bands): + self.__arr_dict[band] = array[ix] + + self.__xcenters = None + self.__ycenters = None + self._meta.update({k: v for k, v in kwargs.items()}) + self._dataset = None + + else: + return Raster( + array, + bands, + meta["crs"], + meta["transform"], + meta["nodata"], + meta["driver"], + ) + def __xycenters(self): """ Method to create np.arrays of the xy-cell centers @@ -256,7 +388,7 @@ def sample_point(self, *point, band=1): dist = np.sqrt(xt + yt) # 3: find indices of minimum distance - md = np.where(dist == np.nanmin(dist)) + md = np.asarray(dist == np.nanmin(dist)).nonzero() # 4: sample the array and average if necessary vals = [] @@ -327,8 +459,6 @@ def resample_to_grid( modelgrid, band, method="nearest", - multithread=False, - thread_pool=2, extrapolate_edges=False, ): """ @@ -363,11 +493,6 @@ def resample_to_grid( `'mode'` for majority sampling - multithread : bool - DEPRECATED boolean flag indicating if multithreading should be - used with the ``mean`` and ``median`` sampling methods - thread_pool : int - DEPRECATED number of threads to use for mean and median sampling extrapolate_edges : bool boolean flag indicating if areas without data should be filled using the ``nearest`` interpolation method. This option @@ -377,16 +502,18 @@ def resample_to_grid( ------- np.array """ - if multithread: - warnings.warn( - "multithread option has been deprecated and will be removed " - "in flopy version 3.3.8" - ) - import_optional_dependency("scipy") rasterstats = import_optional_dependency("rasterstats") from scipy.interpolate import griddata + xmin, xmax, ymin, ymax = modelgrid.extent + rxmin, rxmax, rymin, rymax = self.bounds + if any([rxmax < xmin, rxmin > xmax, rymax < ymin, rymin > ymax]): + raise AssertionError( + "Raster and model grid do not intersect. Check that the grid " + "and raster are in the same coordinate reference system" + ) + method = method.lower() if method in ("linear", "nearest", "cubic"): xc = modelgrid.xcellcenters @@ -410,6 +537,13 @@ def resample_to_grid( arr = self.get_array(band, masked=True) arr = arr.flatten() + # filter out nan values from the original dataset + if np.isnan(np.sum(arr)): + idx = np.isfinite(arr) + rxc = rxc[idx] + ryc = ryc[idx] + arr = arr[idx] + # step 3: use griddata interpolation to snap to grid data = griddata( (rxc, ryc), @@ -534,7 +668,7 @@ def crop(self, polygon, invert=False): xt = (pt[0] - xc) ** 2 yt = (pt[1] - yc) ** 2 hypot = np.sqrt(xt + yt) - ind = np.where(hypot == np.min(hypot)) + ind = np.asarray(hypot == np.min(hypot)).nonzero() yind.append(ind[0][0]) xind.append(ind[1][0]) @@ -770,6 +904,101 @@ def load(raster: Union[str, os.PathLike]): meta["driver"], ) + @staticmethod + def raster_from_array( + array, + modelgrid=None, + nodataval=1e-10, + crs=None, + transform=None, + ): + """ + Method to create a raster from an array. When using a modelgrid to + define the transform, delc and delr must be uniform in each dimension. + Otherwise, the user can define their own transform using the affine + package. + + Parameters + ---------- + array : np.ndarray + array of (n-bands, nrows, ncols) for the raster + modelgrid : flopy.discretization.StructuredGrid + StructuredGrid object (optional), but transform must be defined + if a StructuredGrid is not supplied + nodataval : (int, float) + Null value + crs : coordinate reference system input of many types + transform : affine.Affine + optional affine transform that defines the spatial parameters + of the raster. This must be supplied if a modelgrid is not + used to define the transform + + Returns + ------- + Raster object + """ + from affine import Affine + + if not isinstance(array, np.ndarray): + array = np.array(array) + + if modelgrid is not None: + if crs is None: + if modelgrid.crs is None: + raise ValueError( + "Cannot create a raster from a grid without a " + "coordinate reference system, please provide a crs " + "using crs=" + ) + crs = modelgrid.crs + + if modelgrid.grid_type != "structured": + raise TypeError( + f"{type(modelgrid)} discretizations are not supported" + ) + + if not np.all(modelgrid.delc == modelgrid.delc[0]): + raise AssertionError("DELC must have a uniform spacing") + + if not np.all(modelgrid.delr == modelgrid.delr[0]): + raise AssertionError("DELR must have a uniform spacing") + + yul = modelgrid.yvertices[0, 0] + xul = modelgrid.xvertices[0, 0] + angrot = modelgrid.angrot + transform = Affine( + modelgrid.delr[0], 0, xul, 0, -modelgrid.delc[0], yul + ) + + if angrot != 0: + transform *= Affine.rotation(angrot) + + if array.size % modelgrid.ncpl != 0: + raise AssertionError( + f"Array size {array.size} is not a multiple of the " + f"number of cells per layer in the model grid " + f"{modelgrid.ncpl}" + ) + + array = array.reshape((-1, modelgrid.nrow, modelgrid.ncol)) + + if transform is not None: + if crs is None: + raise ValueError( + "Cannot create a raster without a coordinate reference " + "system, please use crs= to provide a coordinate reference" + ) + + bands, height, width = array.shape + + return Raster( + array, + bands=list(range(1, bands + 1)), + crs=crs, + transform=transform, + nodataval=nodataval, + ) + def plot(self, ax=None, contour=False, **kwargs): """ Method to plot raster layers or contours. diff --git a/flopy/utils/recarray_utils.py b/flopy/utils/recarray_utils.py index 27114c732..4b8a00f44 100644 --- a/flopy/utils/recarray_utils.py +++ b/flopy/utils/recarray_utils.py @@ -62,7 +62,7 @@ def ra_slice(ra, cols): -------- >>> import numpy as np >>> from flopy.utils import ra_slice - >>> a = np.core.records.fromrecords([("a", 1, 1.1), ("b", 2, 2.1)]) + >>> a = np.rec.fromrecords([("a", 1, 1.1), ("b", 2, 2.1)]) >>> ra_slice(a, ['f0', 'f1']) rec.array([('a', 1), ('b', 2)], dtype=[('f0', ' 1: diff --git a/flopy/utils/swroutputfile.py b/flopy/utils/swroutputfile.py index a75beeb7a..eff248868 100644 --- a/flopy/utils/swroutputfile.py +++ b/flopy/utils/swroutputfile.py @@ -109,7 +109,7 @@ def get_connectivity(self): ---------- Returns - ---------- + ------- data : numpy array Array has size (nrecord, 3). None is returned if swrtype is not 'flow' @@ -134,7 +134,7 @@ def get_nrecords(self): Get the number of records in the file Returns - ---------- + ------- out : tuple of int A tuple with the number of records and number of flow items in the file. The number of flow items is non-zero only if @@ -149,7 +149,7 @@ def get_kswrkstpkper(self): in the file Returns - ---------- + ------- out : list of (kswr, kstp, kper) tuples List of unique kswr, kstp, kper combinations in binary file. kswr, kstp, and kper values are zero-based. @@ -162,7 +162,7 @@ def get_ntimes(self): Get the number of times in the file Returns - ---------- + ------- out : int The number of simulation times (totim) in binary file. @@ -174,7 +174,7 @@ def get_times(self): Get a list of unique times in the file Returns - ---------- + ------- out : list of floats List contains unique simulation times (totim) in binary file. @@ -186,7 +186,7 @@ def get_record_names(self): Get a list of unique record names in the file Returns - ---------- + ------- out : list of strings List of unique text names in the binary file. @@ -210,7 +210,7 @@ def get_data(self, idx=None, kswrkstpkper=None, totim=None): The simulation time. (default is None) Returns - ---------- + ------- data : numpy record array Array has size (nitems). @@ -231,11 +231,11 @@ def get_data(self, idx=None, kswrkstpkper=None, totim=None): kper1 = kswrkstpkper[2] totim1 = self._recordarray[ - np.where( + np.asarray( (self._recordarray["kswr"] == kswr1) & (self._recordarray["kstp"] == kstp1) & (self._recordarray["kper"] == kper1) - ) + ).nonzero() ]["totim"][0] elif totim is not None: totim1 = totim @@ -288,7 +288,7 @@ def get_ts(self, irec=0, iconn=0, klay=0, istr=0): (default is 0) Returns - ---------- + ------- out : numpy recarray Array has size (ntimes, nitems). The first column in the data array will contain time (totim). nitems is 2 for stage @@ -543,7 +543,6 @@ def _read_qaq(self): for irch in range(self.nrecord): klay = self.itemlist[irch] for k in range(klay): - # r[idx, 0] = irch reaches[idx] = irch idx += 1 diff --git a/flopy/utils/triangle.py b/flopy/utils/triangle.py index a0d86d18f..e378fb318 100644 --- a/flopy/utils/triangle.py +++ b/flopy/utils/triangle.py @@ -131,6 +131,7 @@ def add_region(self, point, attribute=0, maximum_area=None): None """ + point = GeoSpatialUtil(point, shapetype="point").points self._regions.append([point, attribute, maximum_area]) def build(self, verbose=False): @@ -309,7 +310,7 @@ def plot_boundary(self, ibm, ax=None, **kwargs): """ if ax is None: ax = plt.gca() - idx = np.where(self.edge["boundary_marker"] == ibm)[0] + idx = np.asarray(self.edge["boundary_marker"] == ibm).nonzero()[0] for i in idx: iv1 = self.edge["endpoint1"][i] iv2 = self.edge["endpoint2"][i] diff --git a/flopy/utils/util_array.py b/flopy/utils/util_array.py index b307a2353..8d341f51a 100644 --- a/flopy/utils/util_array.py +++ b/flopy/utils/util_array.py @@ -6,8 +6,6 @@ """ -# from future.utils import with_metaclass - import copy import os import shutil @@ -255,7 +253,6 @@ def __setattr__(self, key, value): elif key.lower() == "binary": value = bool(value) if value and self.free: - # raise Exception("cannot switch from 'free' to 'binary' format") self._isfree = False self._isbinary = value self._set_defaults() @@ -263,7 +260,6 @@ def __setattr__(self, key, value): elif key.lower() == "free": value = bool(value) if value and self.binary: - # raise Exception("cannot switch from 'binary' to 'free' format") self._isbinary = False self._isfree = bool(value) self._set_defaults() @@ -708,7 +704,7 @@ def plot( List of unique values to be excluded from the plot. Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. @@ -771,7 +767,6 @@ def array(self): if nrow is not None: # typical 3D case a = np.empty((self.shape), dtype=self._dtype) - # for i,u2d in self.uds: for i, u2d in enumerate(self.util_2ds): a[i] = u2d.array else: @@ -1226,7 +1221,6 @@ def __get_3d_instance(self, kper, arg): arg, fmtin=self.fmtin, name=name, - # ext_filename=ext_filename, locat=self.locat, array_free_format=self.array_free_format, ) @@ -1520,7 +1514,7 @@ def plot( extracted. (default is zero). Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. @@ -1974,7 +1968,7 @@ def plot( List of unique values to be excluded from the plot. Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. @@ -2143,9 +2137,6 @@ def python_file_path(self): ------- file_path (str) : path relative to python: includes model_ws """ - # if self.vtype != str: - # raise Exception("Util2d call to python_file_path " + - # "for vtype != str") python_file_path = "" if self._model.model_ws != ".": python_file_path = os.path.join(self._model.model_ws) @@ -2258,8 +2249,6 @@ def get_openclose_cr(self): def get_external_cr(self): locat = self._model.next_ext_unit() - # if self.format.binary: - # locat = -1 * np.abs(locat) self._model.add_external( self.model_file_path, locat, self.format.binary ) @@ -2838,11 +2827,6 @@ def load( curr_unit = cunit break - # Allows for special MT3D array reader - # array_format = None - # if hasattr(model, 'array_format'): - # array_format = model.array_format - cr_dict = Util2d.parse_control_record( f_handle.readline(), current_unit=curr_unit, @@ -2872,7 +2856,6 @@ def load( fname = fname.replace('"', "") fname = fname.replace("\\", os.path.sep) fname = os.path.join(model.model_ws, fname) - # load_txt(shape, file_in, dtype, fmtin): assert os.path.exists( fname ), f"Util2d.load() error: open/close file {fname} not found" @@ -3005,7 +2988,6 @@ def parse_control_record( nunit = abs(int(raw[1])) if ext_unit_dict is not None: try: - # td = ext_unit_dict[int(raw[1])] fname = ext_unit_dict[nunit].filename.strip() except: print( @@ -3045,8 +3027,6 @@ def parse_control_record( cnstnt = int(line[10:20].strip()) else: cnstnt = 0 - # if cnstnt == 0: - # cnstnt = 1 if locat != 0: if len(line) >= 40: fmtin = line[20:40].strip() @@ -3056,10 +3036,6 @@ def parse_control_record( iprn = int(line[40:50].strip()) except: iprn = 0 - # locat = int(raw[0]) - # cnstnt = float(raw[1]) - # fmtin = raw[2].strip() - # iprn = int(raw[3]) if locat == 0: freefmt = "constant" elif locat < 0: diff --git a/flopy/utils/util_list.py b/flopy/utils/util_list.py index 66d2baf35..8616cf11d 100644 --- a/flopy/utils/util_list.py +++ b/flopy/utils/util_list.py @@ -312,7 +312,6 @@ def __cast_data(self, data): # If data is a list, then all we can do is try to cast it to # an ndarray, then cast again to a recarray if isinstance(data, list): - # warnings.warn("MfList casting list to array") try: data = np.array(data) except Exception as e: @@ -420,7 +419,7 @@ def __cast_ndarray(self, kper, d): f"dtype len: {len(self.dtype)}" ) try: - self.__data[kper] = np.core.records.fromarrays( + self.__data[kper] = np.rec.fromarrays( d.transpose(), dtype=self.dtype ) except Exception as e: @@ -557,8 +556,6 @@ def __getitem__(self, kper): # If the data entry for kper is a string, # return the corresponding recarray, # but don't reset the value in the data dict - # assert kper in list(self.data.keys()), "MfList.__getitem__() kper " + \ - # str(kper) + " not in data.keys()" try: kper = int(kper) except Exception as e: @@ -588,7 +585,6 @@ def __setitem__(self, kper, data): # If data is a list, then all we can do is try to cast it to # an ndarray, then cast again to a recarray if isinstance(data, list): - # warnings.warn("MfList casting list to array") try: data = np.array(data) except Exception as e: @@ -611,10 +607,7 @@ def __setitem__(self, kper, data): f"MfList error: unsupported data type: {type(data)}" ) - # raise NotImplementedError("MfList.__setitem__() not implemented") - def __fromfile(self, f): - # d = np.fromfile(f,dtype=self.dtype,count=count) try: d = np.genfromtxt(f, dtype=self.dtype) except Exception as e: @@ -640,9 +633,6 @@ def get_filenames(self): self._model.array_free_format and self._model.external_path is not None ): - # py_filepath = '' - # py_filepath = os.path.join(py_filepath, - # self._model.external_path) filename = f"{self.package.name[0]}_{kper:04d}.dat" filenames.append(filename) return filenames @@ -820,15 +810,15 @@ def check_kij(self): data = self[kper] if data is not None: k = data["k"] - k_idx = np.where(np.logical_or(k < 0, k >= nl)) + k_idx = np.asarray(np.logical_or(k < 0, k >= nl)).nonzero() if k_idx[0].shape[0] > 0: out_idx.extend(list(k_idx[0])) i = data["i"] - i_idx = np.where(np.logical_or(i < 0, i >= nr)) + i_idx = np.asarray(np.logical_or(i < 0, i >= nr)).nonzero() if i_idx[0].shape[0] > 0: out_idx.extend(list(i_idx[0])) j = data["j"] - j_idx = np.where(np.logical_or(j < 0, j >= nc)) + j_idx = np.asarray(np.logical_or(j < 0, j >= nc)).nonzero() if j_idx[0].shape[0]: out_idx.extend(list(j_idx[0])) @@ -897,9 +887,10 @@ def attribute_by_kper(self, attr, function=np.mean, idx_val=None): kper_data = self.__data[kper] if idx_val is not None: kper_data = kper_data[ - np.where(kper_data[idx_val[0]] == idx_val[1]) + np.asarray( + kper_data[idx_val[0]] == idx_val[1] + ).nonzero() ] - # kper_vtype = self.__vtype[kper] v = function(kper_data[attr]) values.append(v) return values @@ -964,7 +955,7 @@ def plot( List of unique values to be excluded from the plot. Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. @@ -998,40 +989,10 @@ def plot( return axes - def to_shapefile(self, filename, kper=None): - """ - Export stress period boundary condition (MfList) data for a specified - stress period - - Parameters - ---------- - filename : str - Shapefile name to write - kper : int - MODFLOW zero-based stress period number to return. (default is None) - - Returns - ---------- - None - - See Also - -------- - - Notes - ----- - - Examples - -------- - >>> import flopy - >>> ml = flopy.modflow.Modflow.load('test.nam') - >>> ml.wel.to_shapefile('test_hk.shp', kper=1) - """ - import warnings - - warnings.warn( - "Deprecation warning: to_shapefile() is deprecated. use .export()" - ) - self.export(filename, kper=kper) + def to_shapefile(self, *args, **kwargs): + """Raises AttributeError, use :meth:`export`.""" + # deprecated 3.2.4, changed to raise AttributeError version 3.8 + raise AttributeError(".to_shapefile() was removed; use .export()") def to_array(self, kper=0, mask=False): """ @@ -1045,7 +1006,7 @@ def to_array(self, kper=0, mask=False): mask : boolean return array with np.nan instead of zero Returns - ---------- + ------- out : dict of numpy.ndarrays Dictionary of 3-D numpy arrays containing the stress period data for a selected stress period. The dictionary keys are the MfList dtype @@ -1125,7 +1086,6 @@ def to_array(self, kper=0, mask=False): (self._model.nlay, self._model.nrow, self._model.ncol), dtype=float, ) - # print(name,kper) for rec in sarr: if unstructured: arr[rec["node"]] += rec[name] @@ -1142,9 +1102,6 @@ def to_array(self, kper=0, mask=False): arr[cnt == 0.0] = np.nan arrays[name] = arr.copy() - # elif mask: - # for name, arr in arrays.items(): - # arrays[name][:] = np.nan return arrays @property diff --git a/flopy/utils/utils_def.py b/flopy/utils/utils_def.py index c157fd563..421222394 100644 --- a/flopy/utils/utils_def.py +++ b/flopy/utils/utils_def.py @@ -1,4 +1,3 @@ -# pylint: disable=E1101 """ Generic classes and utility functions """ @@ -152,7 +151,7 @@ def get_util2d_shape_for_layer(model, layer=0): layer (base 0) for which Util2d shape is sought. Returns - --------- + ------- (nrow,ncol) : tuple of ints util2d shape for the given layer """ @@ -186,7 +185,7 @@ def get_unitnumber_from_ext_unit_dict( Default is 0, in which case the returned output file is None. Returns - --------- + ------- unitnumber : int file unit number for the given modflow package (or None) filenames : list @@ -219,7 +218,7 @@ def type_from_iterable(_iter, index=0, _type=int, default_val=0): default_val : default value (0) Returns - ---------- + ------- val : value of type _type, or default_val """ try: diff --git a/flopy/utils/voronoi.py b/flopy/utils/voronoi.py index 15982d205..d3f52566a 100644 --- a/flopy/utils/voronoi.py +++ b/flopy/utils/voronoi.py @@ -153,11 +153,11 @@ def tri2vor(tri, **kwargs): polygon = [(x, y) for x, y in tri._polygons[ipolygon]] vor_vert_notindomain = point_in_polygon(xc, yc, polygon) vor_vert_notindomain = vor_vert_notindomain.flatten() - idx = np.where(vor_vert_notindomain == True) + idx = np.asarray(vor_vert_notindomain == True).nonzero() vor_vert_indomain[idx] = False idx_vertindex = -1 * np.ones((nvertices), int) - idx_filtered = np.where(vor_vert_indomain == True) + idx_filtered = np.asarray(vor_vert_indomain == True).nonzero() nvalid_vertices = len(idx_filtered[0]) # renumber valid vertices consecutively idx_vertindex[idx_filtered] = np.arange(nvalid_vertices) diff --git a/flopy/utils/zonbud.py b/flopy/utils/zonbud.py index 91a0a8910..39949a1b0 100644 --- a/flopy/utils/zonbud.py +++ b/flopy/utils/zonbud.py @@ -169,8 +169,6 @@ def __init__( self._zonenamedict[z] = "_".join(a.split()) seen.append(z) - # self._iflow_recnames = self._get_internal_flow_record_names() - # All record names in the cell-by-cell budget binary file self.record_names = [ n.strip() for n in self.cbc.get_unique_record_names(decode=True) @@ -550,18 +548,18 @@ def _update_budget_recordarray( try: if kstpkper is not None: for rn, cn, flux in zip(rownames, colnames, fluxes): - rowidx = np.where( + rowidx = np.asarray( (self._budget["time_step"] == kstpkper[0]) & (self._budget["stress_period"] == kstpkper[1]) & (self._budget["name"] == rn) - ) + ).nonzero() self._budget[cn][rowidx] += flux elif totim is not None: for rn, cn, flux in zip(rownames, colnames, fluxes): - rowidx = np.where( + rowidx = np.asarray( (self._budget["totim"] == totim) & (self._budget["name"] == rn) - ) + ).nonzero() self._budget[cn][rowidx] += flux except Exception as e: @@ -594,9 +592,9 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): # ZONE 4 TO 3 IS THE NEGATIVE OF FLOW FROM 3 TO 4. # 1ST, CALCULATE FLOW BETWEEN NODE J,I,K AND J-1,I,K - k, i, j = np.where( + k, i, j = np.asarray( self.izone[:, :, 1:] > self.izone[:, :, :-1] - ) + ).nonzero() # Adjust column values to account for the starting position of "nz" j += 1 @@ -615,9 +613,9 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): # Don't include CH to CH flow (can occur if CHTOCH option is used) # Create an iterable tuple of (from zone, to zone, flux) # Then group tuple by (from_zone, to_zone) and sum the flux values - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nzl[idx], nz[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim @@ -627,18 +625,18 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): # Don't include CH to CH flow (can occur if CHTOCH option is used) # Create an iterable tuple of (from zone, to zone, flux) # Then group tuple by (from_zone, to_zone) and sum the flux values - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nzl[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) # FLOW BETWEEN NODE J,I,K AND J+1,I,K - k, i, j = np.where( + k, i, j = np.asarray( self.izone[:, :, :-1] > self.izone[:, :, 1:] - ) + ).nonzero() # Define the zone from which flow is coming nz = self.izone[k, i, j] @@ -654,9 +652,9 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): # Don't include CH to CH flow (can occur if CHTOCH option is used) # Create an iterable tuple of (from zone, to zone, flux) # Then group tuple by (from_zone, to_zone) and sum the flux values - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nzr[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim @@ -666,24 +664,24 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): # Don't include CH to CH flow (can occur if CHTOCH option is used) # Create an iterable tuple of (from zone, to zone, flux) # Then group tuple by (from_zone, to_zone) and sum the flux values - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nzr[idx], nz[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) # CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION - k, i, j = np.where(ich == 1) + k, i, j = np.asarray(ich == 1).nonzero() k, i, j = k[j > 0], i[j > 0], j[j > 0] jl = j - 1 nzl = self.izone[k, i, jl] nz = self.izone[k, i, j] q = data[k, i, jl] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] @@ -691,9 +689,9 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi[tzi != 0]] @@ -701,7 +699,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - k, i, j = np.where(ich == 1) + k, i, j = np.asarray(ich == 1).nonzero() k, i, j = ( k[j < self.ncol - 1], i[j < self.ncol - 1], @@ -711,9 +709,9 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): jr = j + 1 nzr = self.izone[k, i, jr] q = data[k, i, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] @@ -721,9 +719,9 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi] @@ -734,7 +732,6 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): except Exception as e: print(e) raise - return def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): """ @@ -758,64 +755,64 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): # "FLOW FRONT FACE" # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I-1,K - k, i, j = np.where( + k, i, j = np.asarray( self.izone[:, 1:, :] < self.izone[:, :-1, :] - ) + ).nonzero() i += 1 ia = i - 1 nza = self.izone[k, ia, j] nz = self.izone[k, i, j] q = data[k, ia, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I+1,K. - k, i, j = np.where( + k, i, j = np.asarray( self.izone[:, :-1, :] < self.izone[:, 1:, :] - ) + ).nonzero() nz = self.izone[k, i, j] ib = i + 1 nzb = self.izone[k, ib, j] q = data[k, i, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) # CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION - k, i, j = np.where(ich == 1) + k, i, j = np.asarray(ich == 1).nonzero() k, i, j = k[i > 0], i[i > 0], j[i > 0] ia = i - 1 nza = self.izone[k, ia, j] nz = self.izone[k, i, j] q = data[k, ia, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] @@ -823,9 +820,9 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi] @@ -833,7 +830,7 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - k, i, j = np.where(ich == 1) + k, i, j = np.asarray(ich == 1).nonzero() k, i, j = ( k[i < self.nrow - 1], i[i < self.nrow - 1], @@ -843,9 +840,9 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): ib = i + 1 nzb = self.izone[k, ib, j] q = data[k, i, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] @@ -853,9 +850,9 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi] @@ -890,64 +887,64 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): # "FLOW LOWER FACE" # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K-1 - k, i, j = np.where( + k, i, j = np.asarray( self.izone[1:, :, :] < self.izone[:-1, :, :] - ) + ).nonzero() k += 1 ka = k - 1 nza = self.izone[ka, i, j] nz = self.izone[k, i, j] q = data[ka, i, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K+1 - k, i, j = np.where( + k, i, j = np.asarray( self.izone[:-1, :, :] < self.izone[1:, :, :] - ) + ).nonzero() nz = self.izone[k, i, j] kb = k + 1 nzb = self.izone[kb, i, j] q = data[k, i, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) # CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION - k, i, j = np.where(ich == 1) + k, i, j = np.asarray(ich == 1).nonzero() k, i, j = k[k > 0], i[k > 0], j[k > 0] ka = k - 1 nza = self.izone[ka, i, j] nz = self.izone[k, i, j] q = data[ka, i, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] @@ -955,9 +952,9 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi] @@ -965,7 +962,7 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - k, i, j = np.where(ich == 1) + k, i, j = np.asarray(ich == 1).nonzero() k, i, j = ( k[k < self.nlay - 1], i[k < self.nlay - 1], @@ -975,9 +972,9 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): kb = k + 1 nzb = self.izone[kb, i, j] q = data[k, i, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] @@ -985,9 +982,9 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi] @@ -998,7 +995,6 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): except Exception as e: print(e) raise - return def _accumulate_flow_ssst(self, recname, kstpkper, totim): # NOT AN INTERNAL FLOW TERM, SO MUST BE A SOURCE TERM OR STORAGE @@ -1051,9 +1047,9 @@ def _accumulate_flow_ssst(self, recname, kstpkper, totim): # 1-LAYER ARRAY THAT DEFINES LAYER 1 qin = np.ma.zeros(self.cbc_shape, self.float_type) qout = np.ma.zeros(self.cbc_shape, self.float_type) - r, c = np.where(data > 0) + r, c = np.asarray(data > 0).nonzero() qin[0, r, c] = data[r, c] - r, c = np.where(data < 0) + r, c = np.asarray(data < 0).nonzero() qout[0, r, c] = data[r, c] else: # Should not happen @@ -1105,16 +1101,16 @@ def _compute_mass_balance(self, kstpkper, totim): innames = [n for n in recnames if n.startswith("FROM_")] outnames = [n for n in recnames if n.startswith("TO_")] if kstpkper is not None: - rowidx = np.where( + rowidx = np.asarray( (self._budget["time_step"] == kstpkper[0]) & (self._budget["stress_period"] == kstpkper[1]) & np.in1d(self._budget["name"], innames) - ) + ).nonzero() elif totim is not None: - rowidx = np.where( + rowidx = np.asarray( (self._budget["totim"] == totim) & np.in1d(self._budget["name"], innames) - ) + ).nonzero() a = _numpyvoid2numeric( self._budget[list(self._zonenamedict.values())][rowidx] ) @@ -1127,16 +1123,16 @@ def _compute_mass_balance(self, kstpkper, totim): # Compute outflows if kstpkper is not None: - rowidx = np.where( + rowidx = np.asarray( (self._budget["time_step"] == kstpkper[0]) & (self._budget["stress_period"] == kstpkper[1]) & np.in1d(self._budget["name"], outnames) - ) + ).nonzero() elif totim is not None: - rowidx = np.where( + rowidx = np.asarray( (self._budget["totim"] == totim) & np.in1d(self._budget["name"], outnames) - ) + ).nonzero() a = _numpyvoid2numeric( self._budget[list(self._zonenamedict.values())][rowidx] ) @@ -1716,7 +1712,7 @@ def __mul__(self, other): newbud = self._budget.copy() for f in self._zonenamedict.values(): newbud[f] = np.array([r for r in newbud[f]]) * other - idx = np.in1d(self._budget["name"], "PERCENT_DISCREPANCY") + idx = np.isin(self._budget["name"], "PERCENT_DISCREPANCY") newbud[:][idx] = self._budget[:][idx] newobj = self.copy() newobj._budget = newbud @@ -1726,7 +1722,7 @@ def __truediv__(self, other): newbud = self._budget.copy() for f in self._zonenamedict.values(): newbud[f] = np.array([r for r in newbud[f]]) / float(other) - idx = np.in1d(self._budget["name"], "PERCENT_DISCREPANCY") + idx = np.isin(self._budget["name"], "PERCENT_DISCREPANCY") newbud[:][idx] = self._budget[:][idx] newobj = self.copy() newobj._budget = newbud @@ -1736,7 +1732,7 @@ def __div__(self, other): newbud = self._budget.copy() for f in self._zonenamedict.values(): newbud[f] = np.array([r for r in newbud[f]]) / float(other) - idx = np.in1d(self._budget["name"], "PERCENT_DISCREPANCY") + idx = np.isin(self._budget["name"], "PERCENT_DISCREPANCY") newbud[:][idx] = self._budget[:][idx] newobj = self.copy() newobj._budget = newbud @@ -1746,7 +1742,7 @@ def __add__(self, other): newbud = self._budget.copy() for f in self._zonenamedict.values(): newbud[f] = np.array([r for r in newbud[f]]) + other - idx = np.in1d(self._budget["name"], "PERCENT_DISCREPANCY") + idx = np.isin(self._budget["name"], "PERCENT_DISCREPANCY") newbud[:][idx] = self._budget[:][idx] newobj = self.copy() newobj._budget = newbud @@ -1756,7 +1752,7 @@ def __sub__(self, other): newbud = self._budget.copy() for f in self._zonenamedict.values(): newbud[f] = np.array([r for r in newbud[f]]) - other - idx = np.in1d(self._budget["name"], "PERCENT_DISCREPANCY") + idx = np.isin(self._budget["name"], "PERCENT_DISCREPANCY") newbud[:][idx] = self._budget[:][idx] newobj = self.copy() newobj._budget = newbud @@ -2423,7 +2419,7 @@ def _recarray_to_dataframe( else: index_cols = ["time_step", "stress_period", "name"] - df = df.set_index(index_cols) # .sort_index(level=0) + df = df.set_index(index_cols) if zones is not None: keep_cols = zones else: @@ -2464,7 +2460,7 @@ def _get_budget(recarray, zonenamedict, names=None, zones=None, net=False): if "totim" in recarray.dtype.names: standard_fields.insert(0, "totim") select_fields = standard_fields + list(zonenamedict.values()) - select_records = np.where(recarray["name"] == recarray["name"]) + select_records = np.asarray(recarray["name"] == recarray["name"]).nonzero() if zones is not None: for idx, z in enumerate(zones): if isinstance(z, int): @@ -2473,7 +2469,7 @@ def _get_budget(recarray, zonenamedict, names=None, zones=None, net=False): if names is not None: names = _clean_budget_names(recarray, names) - select_records = np.in1d(recarray["name"], names) + select_records = np.isin(recarray["name"], names) if net: if names is None: names = _clean_budget_names(recarray, _get_record_names(recarray)) @@ -2489,7 +2485,7 @@ def _get_budget(recarray, zonenamedict, names=None, zones=None, net=False): seen.append(iname) else: net_names.append(iname) - select_records = np.in1d(net_budget["name"], net_names) + select_records = np.isin(net_budget["name"], net_names) return net_budget[select_fields][select_records] else: return recarray[select_fields][select_records] @@ -2581,8 +2577,8 @@ def _compute_net_budget(recarray, zonenamedict): if "totim" not in recarray.dtype.names: select_fields.pop(0) - select_records_in = np.in1d(recarray["name"], innames) - select_records_out = np.in1d(recarray["name"], outnames) + select_records_in = np.isin(recarray["name"], innames) + select_records_out = np.isin(recarray["name"], outnames) in_budget = recarray[select_fields][select_records_in] out_budget = recarray[select_fields][select_records_out] net_budget = in_budget.copy() @@ -2947,10 +2943,10 @@ def _pivot_recarray(recarray): pvt_rec = np.recarray((1,), dtype=dtype) n = 0 for kstp, kper in kstp_kper: - idxs = np.where( + idxs = np.asarray( (recarray["time_step"] == kstp) & (recarray["stress_period"] == kper) - ) + ).nonzero() if len(idxs) == 0: pass else: @@ -3010,7 +3006,7 @@ def _volumetric_flux(recarray, modeltime, extrapolate_kper=False): perlen = modeltime.perlen totim = np.add.accumulate(perlen) for per in range(nper): - idx = np.where(recarray["kper"] == per)[0] + idx = np.asarray(recarray["kper"] == per).nonzero()[0] if len(idx) == 0: continue @@ -3021,7 +3017,7 @@ def _volumetric_flux(recarray, modeltime, extrapolate_kper=False): if zone == 0: continue - zix = np.where(temp["zone"] == zone)[0] + zix = np.asarray(temp["zone"] == zone).nonzero()[0] if len(zix) == 0: raise Exception @@ -3054,9 +3050,9 @@ def _volumetric_flux(recarray, modeltime, extrapolate_kper=False): totim = modeltime.totim for ix, nstp in enumerate(modeltime.nstp): for stp in range(nstp): - idx = np.where( + idx = np.asarray( (recarray["kper"] == ix) & (recarray["kstp"] == stp) - ) + ).nonzero() if len(idx[0]) == 0: continue elif n == 0: diff --git a/flopy/version.py b/flopy/version.py index f2ed5789f..f0447fffa 100644 --- a/flopy/version.py +++ b/flopy/version.py @@ -1,4 +1,4 @@ # flopy version file automatically created using -# update_version.py on May 23, 2024 20:49:48 +# update_version.py on August 08, 2024 13:58:49 -__version__ = "3.7.0" +__version__ = "3.8.0" diff --git a/pyproject.toml b/pyproject.toml index 3d9e0607e..f18d5cd16 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,7 +29,7 @@ classifiers = [ ] requires-python = ">=3.8" dependencies = [ - "numpy >=1.15.0,<2.0.0", + "numpy>=1.20.3", "matplotlib >=1.4.0", "pandas >=2.0.0" ] @@ -47,6 +47,7 @@ test = [ "flaky", "filelock", "jupyter", + "jupyter_client >=8.4.0", # avoid datetime.utcnow() deprecation warning "jupytext", "modflow-devtools", "pytest !=8.1.0", @@ -130,12 +131,18 @@ extend-include = [ ] [tool.ruff.lint] -select = ["F", "E", "I001"] +select = [ + "D409", # pydocstyle - section-underline-matches-section-length + "E", # pycodestyle error + "F", # Pyflakes + "I001", # isort - unsorted-imports +] ignore = [ "E402", # module level import not at top of file "E501", # line too long TODO FIXME "E712", # Avoid equality comparisons to `True` "E722", # do not use bare `except` + "E721", # use `is`/`is not` for type comparisons "E741", # ambiguous variable name "F401", # unused import "F403", # unable to detect undefined names (star imports) diff --git a/scripts/README.md b/scripts/README.md index 71ed667f2..64b0536af 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -33,7 +33,6 @@ For instance, `e689af57e7439b9005749d806248897ad550eab5_20150811_041632_uncommit The `update_version.py` script can be used to update FloPy version numbers. Running the script first updates the version in `version.txt`, then propagates the change to various other places version strings or timestamps are embedded in the repository: - `flopy/version.py` -- `flopy/DISCLAIMER.md` - `CITATION.cff` - `README.md` - `docs/PyPI_release.md` @@ -46,12 +45,10 @@ If the script is run with no arguments, the version number is not changed, but u python scripts/update_version.py -v 3.3.6 ``` -To get the current version number, use the `--get` flag: +To get the current version number, use the `--get` flag (short `-g`): ```shell -python scripts/update_version.py +python scripts/update_version.py -g ``` This simply returns the contents of `version.txt` and does not write any changes to the repository's files. - -By default, the script assumes a local development version of FloPy. The `--approve` flag should be used prior to releasing a new FloPy version. This will alter the `DISCLAIMER.md` file, substituting wording to indicate the version is no longer preliminary but approved for official release. See [the release docs](../docs/make_release.md) for more information. diff --git a/scripts/process_benchmarks.py b/scripts/process_benchmarks.py index 1b29da52e..b934c5b33 100644 --- a/scripts/process_benchmarks.py +++ b/scripts/process_benchmarks.py @@ -16,7 +16,6 @@ json_paths = list(Path(indir).rglob("*.json")) print(f"Found {len(json_paths)} JSON files") -# pprint([str(p) for p in json_paths]) def get_benchmarks(paths): @@ -37,8 +36,6 @@ def get_benchmarks(paths): fullname = benchmark["fullname"] included = [ "min", - # 'max', - # 'median', "mean", ] for stat, value in benchmark["stats"].items(): @@ -175,6 +172,5 @@ def seaborn_plot(stats): stats = pd.DataFrame(case).groupby("stat") case_name = str(case_name).replace("/", "_").replace(":", "_") - # fig = matplotlib_plot(stats) fig = seaborn_plot(stats) plt.savefig(str(outdir / f"{case_name}.png")) diff --git a/scripts/update_version.py b/scripts/update_version.py index 3f309f19c..619a9e133 100644 --- a/scripts/update_version.py +++ b/scripts/update_version.py @@ -1,5 +1,4 @@ import argparse -import json import re import textwrap from datetime import datetime @@ -9,6 +8,14 @@ from filelock import FileLock from packaging.version import Version +_epilog = """\ +Update version information stored in version.txt in the project root, +as well as several other files in the repository. If --version is not +provided, the version number will not be changed. A file lock is held +to synchronize file access. The version tag must comply with standard +'..' format conventions for semantic versioning. +To show the version without changing anything, use --get (short -g). +""" _project_name = "flopy" _project_root_path = Path(__file__).parent.parent _version_txt_path = _project_root_path / "version.txt" @@ -17,53 +24,21 @@ # file names and the path to the file relative to the repo root directory file_paths_list = [ _project_root_path / "CITATION.cff", - _project_root_path / "code.json", _project_root_path / "README.md", _project_root_path / "docs" / "PyPI_release.md", _project_root_path / "flopy" / "version.py", - _project_root_path / "flopy" / "DISCLAIMER.md", ] file_paths = {pth.name: pth for pth in file_paths_list} # keys for each file -approved_disclaimer = """Disclaimer ----------- - -This software is provided "as is" and "as-available", and makes no -representations or warranties of any kind concerning the software, whether -express, implied, statutory, or other. This includes, without limitation, -warranties of title, merchantability, fitness for a particular purpose, -non-infringement, absence of latent or other defects, accuracy, or the -presence or absence of errors, whether or not known or discoverable. -""" - -preliminary_disclaimer = """Disclaimer ----------- - -This software is preliminary or provisional and is subject to revision. It is -being provided to meet the need for timely best science. This software is -provided "as is" and "as-available", and makes no representations or warranties -of any kind concerning the software, whether express, implied, statutory, or -other. This includes, without limitation, warranties of title, -merchantability, fitness for a particular purpose, non-infringement, absence -of latent or other defects, accuracy, or the presence or absence of errors, -whether or not known or discoverable. -""" - - def split_nonnumeric(s): match = re.compile("[^0-9]").search(s) return [s[: match.start()], s[match.start() :]] if match else s -_initial_version = Version("0.0.1") _current_version = Version(_version_txt_path.read_text().strip()) -def get_disclaimer(approved: bool = False): - return approved_disclaimer if approved else preliminary_disclaimer - - def update_version_txt(version: Version): with open(_version_txt_path, "w") as f: f.write(str(version)) @@ -81,15 +56,10 @@ def update_version_py(timestamp: datetime, version: Version): print(f"Updated {_version_py_path} to version {version}") -def get_software_citation( - timestamp: datetime, version: Version, approved: bool = False -): +def get_software_citation(timestamp: datetime, version: Version): # get data Software/Code citation for FloPy citation = yaml.safe_load(file_paths["CITATION.cff"].read_text()) - sb = "" - if not approved: - sb = " (preliminary)" # format author names authors = [] for author in citation["authors"]: @@ -116,7 +86,7 @@ def get_software_citation( # add the rest of the citation line += ( - f", {timestamp.year}, FloPy v{version}{sb}: " + f", {timestamp.year}, FloPy v{version}: " f"U.S. Geological Survey Software Release, {timestamp:%d %B %Y}, " "https://doi.org/10.5066/F7BK19FH]" "(https://doi.org/10.5066/F7BK19FH)" @@ -125,83 +95,44 @@ def get_software_citation( return line -def update_codejson( - timestamp: datetime, version: Version, approved: bool = False -): - # define json filename - json_fname = file_paths["code.json"] - - # load and modify json file - data = json.loads(json_fname.read_text()) - - # modify the json file data - data[0]["date"]["metadataLastUpdated"] = timestamp.strftime("%Y-%m-%d") - data[0]["version"] = str(version) - data[0]["status"] = "Release" if approved else "Preliminary" - - # rewrite the json file - with open(json_fname, "w") as f: - json.dump(data, f, indent=4) - f.write("\n") - - print(f"Updated {json_fname} to version {version}") - - -def update_readme_markdown( - timestamp: datetime, version: Version, approved: bool = False -): - # create disclaimer text - disclaimer = get_disclaimer(approved) - +def update_readme_markdown(timestamp: datetime, version: Version): # read README.md into memory fpth = file_paths["README.md"] lines = fpth.read_text().rstrip().split("\n") # rewrite README.md - terminate = False - f = open(fpth, "w") - for line in lines: - if "### Version " in line: - line = f"### Version {version}" - if not approved: - line += " (preliminary)" - elif "[flopy continuous integration]" in line: - line = ( - "[![flopy continuous integration](https://github.com/" - "modflowpy/flopy/actions/workflows/commit.yml/badge.svg?" - "branch=develop)](https://github.com/modflowpy/flopy/actions/" - "workflows/commit.yml)" - ) - elif "[Read the Docs]" in line: - line = ( - "[![Read the Docs](https://github.com/modflowpy/flopy/" - "actions/workflows/rtd.yml/badge.svg?branch=develop)]" - "(https://github.com/modflowpy/flopy/actions/" - "workflows/rtd.yml)" - ) - elif "[Coverage Status]" in line: - line = ( - "[![Coverage Status](https://coveralls.io/repos/github/" - "modflowpy/flopy/badge.svg?branch=develop)]" - "(https://coveralls.io/github/modflowpy/" - "flopy?branch=develop)" - ) - elif "doi.org/10.5066/F7BK19FH" in line: - line = get_software_citation(timestamp, version, approved) - elif "Disclaimer" in line: - line = disclaimer - terminate = True - f.write(f"{line}\n") - if terminate: - break + with open(fpth, "w") as f: + for line in lines: + if "### Version " in line: + line = f"### Version {version}" + elif "[flopy continuous integration]" in line: + line = ( + "[![flopy continuous integration](https://github.com/" + "modflowpy/flopy/actions/workflows/commit.yml/badge.svg?" + "branch=develop)](https://github.com/modflowpy/flopy/actions/" + "workflows/commit.yml)" + ) + elif "[Read the Docs]" in line: + line = ( + "[![Read the Docs](https://github.com/modflowpy/flopy/" + "actions/workflows/rtd.yml/badge.svg?branch=develop)]" + "(https://github.com/modflowpy/flopy/actions/" + "workflows/rtd.yml)" + ) + elif "[Coverage Status]" in line: + line = ( + "[![Coverage Status](https://coveralls.io/repos/github/" + "modflowpy/flopy/badge.svg?branch=develop)]" + "(https://coveralls.io/github/modflowpy/" + "flopy?branch=develop)" + ) + elif "doi.org/10.5066/F7BK19FH" in line: + line = get_software_citation(timestamp, version) + + f.write(f"{line}\n") - f.close() print(f"Updated {fpth} to version {version}") - # write disclaimer markdown file - file_paths["DISCLAIMER.md"].write_text(disclaimer) - print(f"Updated {file_paths['DISCLAIMER.md']} to version {version}") - def update_citation_cff(timestamp: datetime, version: Version): # read CITATION.cff to modify @@ -225,28 +156,18 @@ def update_citation_cff(timestamp: datetime, version: Version): print(f"Updated {fpth} to version {version}") -def update_PyPI_release( - timestamp: datetime, version: Version, approved: bool = False -): - # create disclaimer text - disclaimer = get_disclaimer(approved) - +def update_pypi_release(timestamp: datetime, version: Version): # read PyPI_release.md into memory fpth = file_paths["PyPI_release.md"] lines = fpth.read_text().rstrip().split("\n") # rewrite PyPI_release.md - terminate = False f = open(fpth, "w") for line in lines: if "doi.org/10.5066/F7BK19FH" in line: - line = get_software_citation(timestamp, version, approved) - elif "Disclaimer" in line: - line = disclaimer - terminate = True + line = get_software_citation(timestamp, version) + f.write(f"{line}\n") - if terminate: - break f.close() print(f"Updated {fpth} to version {version}") @@ -255,7 +176,6 @@ def update_PyPI_release( def update_version( timestamp: datetime = datetime.now(), version: Version = None, - approved: bool = False, ): lock_path = Path(_version_txt_path.name + ".lock") try: @@ -270,10 +190,9 @@ def update_version( with lock: update_version_txt(version) update_version_py(timestamp, version) - update_readme_markdown(timestamp, version, approved) + update_readme_markdown(timestamp, version) update_citation_cff(timestamp, version) - update_codejson(timestamp, version, approved) - update_PyPI_release(timestamp, version, approved) + update_pypi_release(timestamp, version) finally: try: lock_path.unlink() @@ -285,15 +204,7 @@ def update_version( parser = argparse.ArgumentParser( prog=f"Update {_project_name} version", formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=textwrap.dedent( - """\ - Update version information stored in version.txt in the project root, - as well as several other files in the repository. If --version is not - provided, the version number will not be changed. A file lock is held - to synchronize file access. The version tag must comply with standard - '..' format conventions for semantic versioning. - """ - ), + epilog=textwrap.dedent(_epilog), ) parser.add_argument( "-v", @@ -301,13 +212,6 @@ def update_version( required=False, help="Specify the release version", ) - parser.add_argument( - "-a", - "--approve", - required=False, - action="store_true", - help="Approve the release (defaults false)", - ) parser.add_argument( "-g", "--get", @@ -318,14 +222,11 @@ def update_version( args = parser.parse_args() if args.get: - print( - Version((_project_root_path / "version.txt").read_text().strip()) - ) + print(_current_version) else: update_version( timestamp=datetime.now(), version=( Version(args.version) if args.version else _current_version ), - approved=args.approve, ) diff --git a/version.txt b/version.txt index 240bba906..0be1fc7d2 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -3.7.0 \ No newline at end of file +3.8.0 \ No newline at end of file