From 9715022c3f04e2d18a60b54f554cdda5983e409f Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Thu, 23 May 2024 17:11:01 -0400 Subject: [PATCH 01/57] ci(release): update version to 3.8.0.dev0 --- CITATION.cff | 2 +- README.md | 18 ++++++++++-------- code.json | 4 ++-- docs/PyPI_release.md | 16 +++++++++------- flopy/DISCLAIMER.md | 14 ++++++++------ flopy/version.py | 4 ++-- version.txt | 2 +- 7 files changed, 33 insertions(+), 27 deletions(-) diff --git a/CITATION.cff b/CITATION.cff index 08c4554c4..6b51992fb 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -3,7 +3,7 @@ message: If you use this software, please cite both the article from preferred-c references, and the software itself. type: software title: FloPy -version: 3.7.0 +version: 3.8.0.dev0 date-released: '2024-05-23' doi: 10.5066/F7BK19FH abstract: A Python package to create, run, and post-process MODFLOW-based models. diff --git a/README.md b/README.md index 646964b7f..dfa55933c 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ flopy3 -### Version 3.7.0 +### Version 3.8.0.dev0 (preliminary) [![flopy continuous integration](https://github.com/modflowpy/flopy/actions/workflows/commit.yml/badge.svg?branch=develop)](https://github.com/modflowpy/flopy/actions/workflows/commit.yml) [![Read the Docs](https://github.com/modflowpy/flopy/actions/workflows/rtd.yml/badge.svg?branch=develop)](https://github.com/modflowpy/flopy/actions/workflows/rtd.yml) @@ -150,7 +150,7 @@ How to Cite ##### ***Software/Code citation for FloPy:*** -[Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.7.0: U.S. Geological Survey Software Release, 23 May 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH) +[Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.8.0.dev0 (preliminary): U.S. Geological Survey Software Release, 23 May 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH) Additional FloPy Related Publications @@ -175,10 +175,12 @@ MODFLOW Resources Disclaimer ---------- -This software is provided "as is" and "as-available", and makes no -representations or warranties of any kind concerning the software, whether -express, implied, statutory, or other. This includes, without limitation, -warranties of title, merchantability, fitness for a particular purpose, -non-infringement, absence of latent or other defects, accuracy, or the -presence or absence of errors, whether or not known or discoverable. +This software is preliminary or provisional and is subject to revision. It is +being provided to meet the need for timely best science. This software is +provided "as is" and "as-available", and makes no representations or warranties +of any kind concerning the software, whether express, implied, statutory, or +other. This includes, without limitation, warranties of title, +merchantability, fitness for a particular purpose, non-infringement, absence +of latent or other defects, accuracy, or the presence or absence of errors, +whether or not known or discoverable. diff --git a/code.json b/code.json index 89e967b7f..1471ab55d 100644 --- a/code.json +++ b/code.json @@ -1,6 +1,6 @@ [ { - "status": "Release", + "status": "Preliminary", "languages": [ "python" ], @@ -29,7 +29,7 @@ "downloadURL": "https://code.usgs.gov/usgs/modflow/flopy/archive/master.zip", "vcs": "git", "laborHours": -1, - "version": "3.7.0", + "version": "3.8.0.dev0", "date": { "metadataLastUpdated": "2024-05-23" }, diff --git a/docs/PyPI_release.md b/docs/PyPI_release.md index 0c8acdce5..fac5c4fbb 100644 --- a/docs/PyPI_release.md +++ b/docs/PyPI_release.md @@ -30,16 +30,18 @@ How to Cite *Software/Code citation for FloPy:* -[Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.7.0: U.S. Geological Survey Software Release, 23 May 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH) +[Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.8.0.dev0 (preliminary): U.S. Geological Survey Software Release, 23 May 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH) Disclaimer ---------- -This software is provided "as is" and "as-available", and makes no -representations or warranties of any kind concerning the software, whether -express, implied, statutory, or other. This includes, without limitation, -warranties of title, merchantability, fitness for a particular purpose, -non-infringement, absence of latent or other defects, accuracy, or the -presence or absence of errors, whether or not known or discoverable. +This software is preliminary or provisional and is subject to revision. It is +being provided to meet the need for timely best science. This software is +provided "as is" and "as-available", and makes no representations or warranties +of any kind concerning the software, whether express, implied, statutory, or +other. This includes, without limitation, warranties of title, +merchantability, fitness for a particular purpose, non-infringement, absence +of latent or other defects, accuracy, or the presence or absence of errors, +whether or not known or discoverable. diff --git a/flopy/DISCLAIMER.md b/flopy/DISCLAIMER.md index 0e68af88f..81ba20d03 100644 --- a/flopy/DISCLAIMER.md +++ b/flopy/DISCLAIMER.md @@ -1,9 +1,11 @@ Disclaimer ---------- -This software is provided "as is" and "as-available", and makes no -representations or warranties of any kind concerning the software, whether -express, implied, statutory, or other. This includes, without limitation, -warranties of title, merchantability, fitness for a particular purpose, -non-infringement, absence of latent or other defects, accuracy, or the -presence or absence of errors, whether or not known or discoverable. +This software is preliminary or provisional and is subject to revision. It is +being provided to meet the need for timely best science. This software is +provided "as is" and "as-available", and makes no representations or warranties +of any kind concerning the software, whether express, implied, statutory, or +other. This includes, without limitation, warranties of title, +merchantability, fitness for a particular purpose, non-infringement, absence +of latent or other defects, accuracy, or the presence or absence of errors, +whether or not known or discoverable. diff --git a/flopy/version.py b/flopy/version.py index f2ed5789f..aaf8011d1 100644 --- a/flopy/version.py +++ b/flopy/version.py @@ -1,4 +1,4 @@ # flopy version file automatically created using -# update_version.py on May 23, 2024 20:49:48 +# update_version.py on May 23, 2024 17:10:43 -__version__ = "3.7.0" +__version__ = "3.8.0.dev0" diff --git a/version.txt b/version.txt index 240bba906..ae664ee4d 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -3.7.0 \ No newline at end of file +3.8.0.dev0 \ No newline at end of file From 31955a7536b1f53d2a572580e05ff282a933716e Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Fri, 31 May 2024 09:27:28 +1200 Subject: [PATCH 02/57] refactor(expired deprecation): raise AttributeError with to_shapefile (#2200) This PR changes deprecated .to_shapefile() functions to raise AttributeError with a helpful message. These are the affected functions: flopy.utils.util_list.MfList.to_shapefile flopy.pakbase.Package.to_shapefile flopy.mbase.BaseModel.to_shapefile While these functions have been deprecated since 3.2.4 released in 2016, they were never documented as such. Thus a helpful exception message and cross-referenced docstring is used instead of removing these functions. --- autotest/test_export.py | 18 ++++++++++++++++++ flopy/mbase.py | 32 ++++---------------------------- flopy/pakbase.py | 35 ++++------------------------------- flopy/utils/util_list.py | 38 ++++---------------------------------- 4 files changed, 30 insertions(+), 93 deletions(-) diff --git a/autotest/test_export.py b/autotest/test_export.py index fbfac0937..43d70685d 100644 --- a/autotest/test_export.py +++ b/autotest/test_export.py @@ -2061,3 +2061,21 @@ def test_vtk_export_disu_model(function_tmpdir): strt_vtk = vtk_to_numpy(grid.GetCellData().GetArray("strt")) if not np.allclose(gwf.ic.strt.array, strt_vtk): raise AssertionError("'strt' array not written in proper node order") + + +def test_to_shapefile_raises_attributeerror(): + # deprecated 3.2.4, changed to raise AttributeError version 3.8 + # these attributes and this test may eventually be removed + m = flopy.modflow.Modflow() + assert isinstance(m, flopy.mbase.BaseModel) + with pytest.raises(AttributeError, match="was removed"): + m.to_shapefile("nope.shp") + dis = flopy.modflow.ModflowDis(m) + assert isinstance(dis, flopy.pakbase.Package) + with pytest.raises(AttributeError, match="was removed"): + dis.to_shapefile("nope.shp") + wel = flopy.modflow.ModflowWel(m) + spd = wel.stress_period_data + assert isinstance(spd, flopy.utils.MfList) + with pytest.raises(AttributeError, match="was removed"): + spd.to_shapefile("nope.shp", kper=1) diff --git a/flopy/mbase.py b/flopy/mbase.py index ee7bbd63f..3cddbe829 100644 --- a/flopy/mbase.py +++ b/flopy/mbase.py @@ -1703,34 +1703,10 @@ def plot(self, SelPackList=None, **kwargs): ) return axes - def to_shapefile( - self, filename: Union[str, os.PathLike], package_names=None, **kwargs - ): - """ - Wrapper function for writing a shapefile for the model grid. If - package_names is not None, then search through the requested packages - looking for arrays that can be added to the shapefile as attributes - - Parameters - ---------- - filename : str or PathLike - Path of the shapefile to write - package_names : list of package names (e.g. ["dis","lpf"]) - Packages to export data arrays to shapefile. (default is None) - - Returns - ------- - None - - Examples - -------- - >>> import flopy - >>> m = flopy.modflow.Modflow() - >>> m.to_shapefile('model.shp', SelPackList) - - """ - warnings.warn("to_shapefile() is deprecated. use .export()") - self.export(filename, package_names=package_names) + def to_shapefile(self, *args, **kwargs): + """Raises AttributeError, use :meth:`export`.""" + # deprecated 3.2.4, changed to raise AttributeError version 3.8 + raise AttributeError(".to_shapefile() was removed; use .export()") def run_model( diff --git a/flopy/pakbase.py b/flopy/pakbase.py index 31720c849..2e69c7096 100644 --- a/flopy/pakbase.py +++ b/flopy/pakbase.py @@ -838,37 +838,10 @@ def plot(self, **kwargs): axes = PlotUtilities._plot_package_helper(self, **kwargs) return axes - def to_shapefile(self, filename, **kwargs): - """ - Export 2-D, 3-D, and transient 2-D model data to shapefile (polygons). - Adds an attribute for each layer in each data array - - Parameters - ---------- - filename : str - Shapefile name to write - - Returns - ---------- - None - - See Also - -------- - - Notes - ----- - - Examples - -------- - >>> import flopy - >>> ml = flopy.modflow.Modflow.load('test.nam') - >>> ml.lpf.to_shapefile('test_hk.shp') - - """ - import warnings - - warnings.warn("to_shapefile() is deprecated. use .export()") - self.export(filename) + def to_shapefile(self, *args, **kwargs): + """Raises AttributeError, use :meth:`export`.""" + # deprecated 3.2.4, changed to raise AttributeError version 3.8 + raise AttributeError(".to_shapefile() was removed; use .export()") def webdoc(self): """Open the web documentation.""" diff --git a/flopy/utils/util_list.py b/flopy/utils/util_list.py index 66d2baf35..2aa3e5a5f 100644 --- a/flopy/utils/util_list.py +++ b/flopy/utils/util_list.py @@ -998,40 +998,10 @@ def plot( return axes - def to_shapefile(self, filename, kper=None): - """ - Export stress period boundary condition (MfList) data for a specified - stress period - - Parameters - ---------- - filename : str - Shapefile name to write - kper : int - MODFLOW zero-based stress period number to return. (default is None) - - Returns - ---------- - None - - See Also - -------- - - Notes - ----- - - Examples - -------- - >>> import flopy - >>> ml = flopy.modflow.Modflow.load('test.nam') - >>> ml.wel.to_shapefile('test_hk.shp', kper=1) - """ - import warnings - - warnings.warn( - "Deprecation warning: to_shapefile() is deprecated. use .export()" - ) - self.export(filename, kper=kper) + def to_shapefile(self, *args, **kwargs): + """Raises AttributeError, use :meth:`export`.""" + # deprecated 3.2.4, changed to raise AttributeError version 3.8 + raise AttributeError(".to_shapefile() was removed; use .export()") def to_array(self, kper=0, mask=False): """ From bbabf86c0292ed2b237f89371afba01140050592 Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Fri, 31 May 2024 22:33:46 +1200 Subject: [PATCH 03/57] refactor: deprecate unused flopy.utils.binaryfile.binaryread_struct (#2201) --- autotest/test_binaryfile.py | 32 ++++++++++++++++++++++++++++++++ flopy/utils/binaryfile.py | 32 +++++++++++++++++++++++++------- 2 files changed, 57 insertions(+), 7 deletions(-) diff --git a/autotest/test_binaryfile.py b/autotest/test_binaryfile.py index a4e5a400e..1dff66332 100644 --- a/autotest/test_binaryfile.py +++ b/autotest/test_binaryfile.py @@ -39,6 +39,38 @@ def zonbud_model_path(example_data_path): return example_data_path / "zonbud_examples" +def test_binaryread(example_data_path): + # test low-level binaryread() method + pth = example_data_path / "freyberg" / "freyberg.githds" + with open(pth, "rb") as fp: + res = flopy.utils.binaryfile.binaryread(fp, np.int32, 2) + np.testing.assert_array_equal(res, np.array([1, 1], np.int32)) + res = flopy.utils.binaryfile.binaryread(fp, np.float32, 2) + np.testing.assert_array_equal(res, np.array([10, 10], np.float32)) + res = flopy.utils.binaryfile.binaryread(fp, str) + assert res == b" HEAD" + res = flopy.utils.binaryfile.binaryread(fp, np.int32) + assert res == 20 + + +def test_deprecated_binaryread_struct(example_data_path): + # similar to test_binaryread(), but check the calls are deprecated + pth = example_data_path / "freyberg" / "freyberg.githds" + with open(pth, "rb") as fp: + with pytest.deprecated_call(): + res = flopy.utils.binaryfile.binaryread_struct(fp, np.int32, 2) + np.testing.assert_array_equal(res, np.array([1, 1], np.int32)) + with pytest.deprecated_call(): + res = flopy.utils.binaryfile.binaryread_struct(fp, np.float32, 2) + np.testing.assert_array_equal(res, np.array([10, 10], np.float32)) + with pytest.deprecated_call(): + res = flopy.utils.binaryfile.binaryread_struct(fp, str) + assert res == b" HEAD" + with pytest.deprecated_call(): + res = flopy.utils.binaryfile.binaryread_struct(fp, np.int32) + assert res == 20 + + def test_binaryfile_writeread(function_tmpdir, nwt_model_path): model = "Pr3_MFNWT_lower.nam" ml = flopy.modflow.Modflow.load( diff --git a/flopy/utils/binaryfile.py b/flopy/utils/binaryfile.py index 0efce1c49..fa70ad254 100644 --- a/flopy/utils/binaryfile.py +++ b/flopy/utils/binaryfile.py @@ -274,10 +274,16 @@ def binaryread_struct(file, vartype, shape=(1,), charlen=16): cannot be returned, only multi-character strings. Shape has no affect on strings. + .. deprecated:: 3.8.0 + Use :meth:`binaryread` instead. + """ import struct - import numpy as np + warnings.warn( + "binaryread_struct() is deprecated; use binaryread() instead.", + DeprecationWarning, + ) # store the mapping from type to struct format (fmt) typefmtd = {np.int32: "i", np.float32: "f", np.float64: "d"} @@ -306,21 +312,33 @@ def binaryread_struct(file, vartype, shape=(1,), charlen=16): def binaryread(file, vartype, shape=(1,), charlen=16): """ - Uses numpy to read from binary file. This was found to be faster than the - struct approach and is used as the default. + Read text, a scalar value, or an array of values from a binary file. + + Parameters + ---------- + file : file object + is an open file object + vartype : type + is the return variable type: str, numpy.int32, numpy.float32, + or numpy.float64 + shape : tuple, default (1,) + is the shape of the returned array (shape(1, ) returns a single + value) for example, shape = (nlay, nrow, ncol) + charlen : int, default 16 + is the length of the text string. Note that string arrays + cannot be returned, only multi-character strings. Shape has no + affect on strings. """ # read a string variable of length charlen if vartype == str: - result = file.read(charlen * 1) + result = file.read(charlen) else: # find the number of values nval = np.prod(shape) result = np.fromfile(file, vartype, nval) - if nval == 1: - result = result # [0] - else: + if nval != 1: result = np.reshape(result, shape) return result From 50492ad15306f08e18c909646237d51d9e9a0996 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 4 Jun 2024 07:32:54 -0400 Subject: [PATCH 04/57] chore(deps): bump dawidd6/action-download-artifact from 3 to 4 (#2204) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index df9c1b1bb..a6019dcdf 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -173,7 +173,7 @@ jobs: # actions/download-artifact won't look at previous workflow runs but we need to in order to get changelog - name: Download artifacts - uses: dawidd6/action-download-artifact@v3 + uses: dawidd6/action-download-artifact@v4 - name: Draft release env: From 1e4e55198a7ebc9ff524c2357ae857c67fcc3ff5 Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Wed, 5 Jun 2024 23:54:21 +1200 Subject: [PATCH 05/57] ci(rtd): build only html and htmlzip, disable pdf and epub (#2206) The RTD builds have stopped working for a few weeks now, with a fatal error due to insufficient memory on the worker node. Troubleshooting docs suggests to reduce the number of output formats. This PR disables the following output formats. - PDF. For flopy 3.6.0, this document is 2145 pages in US Letter page size (hopefully never printed). Is this document ever read or checked? For example, pages 733 to 753 simply lists numbers 1 to 1000. - EPUB is an e-book file format. Is this document ever read or checked? This PR keeps the following output formats: - HTML is always built, and is the primary document most people use. - Downloadable HTML ("htmlzip"). For flopy 3.6.0, this archive is 47 MB, which is expected. It is plausible this version of docs may need to be downloaded for "offline use" or for archiving, so I feel it is appropriate to keep this format option. --- .readthedocs.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index 7e3afd5ea..43dde5ed0 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -25,8 +25,9 @@ build: sphinx: configuration: .docs/conf.py -# Build docs in additional formats such as PDF and ePub -formats: all +# Build only html and htmlzip (skip others, including PDF and EPUB) +formats: + - htmlzip # Set the Python version and requirements python: From 04c08604c74337c222e3b7af2d92ab61fbbd110b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 5 Jun 2024 08:00:45 -0400 Subject: [PATCH 06/57] chore(deps): bump dawidd6/action-download-artifact from 4 to 5 (#2207) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a6019dcdf..3a92a56ac 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -173,7 +173,7 @@ jobs: # actions/download-artifact won't look at previous workflow runs but we need to in order to get changelog - name: Download artifacts - uses: dawidd6/action-download-artifact@v4 + uses: dawidd6/action-download-artifact@v5 - name: Draft release env: From 378f35436b02bb9d194109b3186d99449597bb87 Mon Sep 17 00:00:00 2001 From: martclanor Date: Thu, 6 Jun 2024 17:18:28 +0200 Subject: [PATCH 07/57] chore: remove unused flake8 and pylint config (#2211) --- .flake8 | 29 -- .pylintrc | 586 --------------------------------------- _config.yml | 1 - flopy/mfusg/mfusgcln.py | 1 - flopy/mfusg/mfusgsms.py | 1 - flopy/utils/utils_def.py | 1 - 6 files changed, 619 deletions(-) delete mode 100644 .flake8 delete mode 100644 .pylintrc delete mode 100644 _config.yml diff --git a/.flake8 b/.flake8 deleted file mode 100644 index d7d2fc9dd..000000000 --- a/.flake8 +++ /dev/null @@ -1,29 +0,0 @@ -[flake8] -exclude = - .git - __pycache__ - build - dist - examples - autotest -ignore = - # https://flake8.pycqa.org/en/latest/user/error-codes.html - F401, - # https://pycodestyle.readthedocs.io/en/latest/intro.html#error-codes - # Indentation - E121, E122, E126, E127, E128, - # Whitespace - E203, E221, E222, E226, E231, E241, - # Import - E402, - # Line length - E501, E502, - # Statement - E722, E741, - # Whitespace warning - W291, W292, W293, - # Blank line warning - W391, - # Line break warning - W503, W504 -statistics = True diff --git a/.pylintrc b/.pylintrc deleted file mode 100644 index a41a6deb5..000000000 --- a/.pylintrc +++ /dev/null @@ -1,586 +0,0 @@ -[MASTER] - -# A comma-separated list of package or module names from where C extensions may -# be loaded. Extensions are loading into the active Python interpreter and may -# run arbitrary code. -extension-pkg-whitelist= - -# Add files or directories to the blacklist. They should be base names, not -# paths. -ignore=CVS - -# Add files or directories matching the regex patterns to the blacklist. The -# regex matches against base names, not paths. -ignore-patterns= - -# Python code to execute, usually for sys.path manipulation such as -# pygtk.require(). -#init-hook= - -# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the -# number of processors available to use. -jobs=1 - -# Control the amount of potential inferred values when inferring a single -# object. This can help the performance when dealing with large functions or -# complex, nested conditions. -limit-inference-results=100 - -# List of plugins (as comma separated values of python module names) to load, -# usually to register additional checkers. -load-plugins= - -# Pickle collected data for later comparisons. -persistent=yes - -# Specify a configuration file. -#rcfile= - -# When enabled, pylint would attempt to guess common misconfiguration and emit -# user-friendly hints instead of false-positive error messages. -suggestion-mode=yes - -# Allow loading of arbitrary C extensions. Extensions are imported into the -# active Python interpreter and may run arbitrary code. -unsafe-load-any-extension=no - - -[MESSAGES CONTROL] - -# Only show warnings with the listed confidence levels. Leave empty to show -# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. -confidence= - -# Disable the message, report, category or checker with the given id(s). You -# can either give multiple identifiers separated by comma (,) or put this -# option multiple times (only on the command line, not in the configuration -# file where it should appear only once). You can also use "--disable=all" to -# disable everything first and then reenable specific checks. For example, if -# you want to run only the similarities checker, you can use "--disable=all -# --enable=similarities". If you want to run only the classes checker, but have -# no Warning level messages displayed, use "--disable=all --enable=classes -# --disable=W". -disable=print-statement, - parameter-unpacking, - unpacking-in-except, - old-raise-syntax, - backtick, - long-suffix, - old-ne-operator, - old-octal-literal, - import-star-module-level, - non-ascii-bytes-literal, - raw-checker-failed, - bad-inline-option, - locally-disabled, - file-ignored, - suppressed-message, - useless-suppression, - deprecated-pragma, - use-symbolic-message-instead, - apply-builtin, - basestring-builtin, - buffer-builtin, - cmp-builtin, - coerce-builtin, - execfile-builtin, - file-builtin, - long-builtin, - raw_input-builtin, - reduce-builtin, - standarderror-builtin, - unicode-builtin, - xrange-builtin, - coerce-method, - delslice-method, - getslice-method, - setslice-method, - no-absolute-import, - old-division, - dict-iter-method, - dict-view-method, - next-method-called, - metaclass-assignment, - indexing-exception, - raising-string, - reload-builtin, - oct-method, - hex-method, - nonzero-method, - cmp-method, - input-builtin, - round-builtin, - intern-builtin, - unichr-builtin, - map-builtin-not-iterating, - zip-builtin-not-iterating, - range-builtin-not-iterating, - filter-builtin-not-iterating, - using-cmp-argument, - eq-without-hash, - div-method, - idiv-method, - rdiv-method, - exception-message-attribute, - invalid-str-codec, - sys-max-int, - bad-python3-import, - deprecated-string-function, - deprecated-str-translate-call, - deprecated-itertools-function, - deprecated-types-field, - next-method-defined, - dict-items-not-iterating, - dict-keys-not-iterating, - dict-values-not-iterating, - deprecated-operator-function, - deprecated-urllib-function, - xreadlines-attribute, - deprecated-sys-function, - exception-escape, - comprehension-escape, - C0330 - - -# Enable the message, report, category or checker with the given id(s). You can -# either give multiple identifier separated by comma (,) or put this option -# multiple time (only on the command line, not in the configuration file where -# it should appear only once). See also the "--disable" option for examples. -enable=c-extension-no-member - - -[REPORTS] - -# Python expression which should return a score less than or equal to 10. You -# have access to the variables 'error', 'warning', 'refactor', and 'convention' -# which contain the number of messages in each category, as well as 'statement' -# which is the total number of statements analyzed. This score is used by the -# global evaluation report (RP0004). -evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) - -# Template used to display messages. This is a python new-style format string -# used to format the message information. See doc for all details. -#msg-template= - -# Set the output format. Available formats are text, parseable, colorized, json -# and msvs (visual studio). You can also give a reporter class, e.g. -# mypackage.mymodule.MyReporterClass. -output-format=text - -# Tells whether to display a full report or only the messages. -reports=no - -# Activate the evaluation score. -score=yes - - -[REFACTORING] - -# Maximum number of nested blocks for function / method body -max-nested-blocks=5 - -# Complete name of functions that never returns. When checking for -# inconsistent-return-statements if a never returning function is called then -# it will be considered as an explicit return statement and no message will be -# printed. -never-returning-functions=sys.exit - - -[LOGGING] - -# Format style used to check logging format string. `old` means using % -# formatting, `new` is for `{}` formatting,and `fstr` is for f-strings. -logging-format-style=old - -# Logging modules to check that the string format arguments are in logging -# function parameter format. -logging-modules=logging - - -[SPELLING] - -# Limits count of emitted suggestions for spelling mistakes. -max-spelling-suggestions=4 - -# Spelling dictionary name. Available dictionaries: none. To make it work, -# install the python-enchant package. -spelling-dict= - -# List of comma separated words that should not be checked. -spelling-ignore-words= - -# A path to a file that contains the private dictionary; one word per line. -spelling-private-dict-file= - -# Tells whether to store unknown words to the private dictionary (see the -# --spelling-private-dict-file option) instead of raising a message. -spelling-store-unknown-words=no - - -[MISCELLANEOUS] - -# List of note tags to take in consideration, separated by a comma. -notes=FIXME, - XXX, - TODO - - -[TYPECHECK] - -# List of decorators that produce context managers, such as -# contextlib.contextmanager. Add to this list to register other decorators that -# produce valid context managers. -contextmanager-decorators=contextlib.contextmanager - -# List of members which are set dynamically and missed by pylint inference -# system, and so shouldn't trigger E1101 when accessed. Python regular -# expressions are accepted. -generated-members=graph.*,requests.* - -# Tells whether missing members accessed in mixin class should be ignored. A -# mixin class is detected if its name ends with "mixin" (case insensitive). -ignore-mixin-members=yes - -# Tells whether to warn about missing members when the owner of the attribute -# is inferred to be None. -ignore-none=yes - -# This flag controls whether pylint should warn about no-member and similar -# checks whenever an opaque object is returned when inferring. The inference -# can return multiple potential results while evaluating a Python object, but -# some branches might not be evaluated, which results in partial inference. In -# that case, it might be useful to still emit no-member and other checks for -# the rest of the inferred objects. -ignore-on-opaque-inference=yes - -# List of class names for which member attributes should not be checked (useful -# for classes with dynamically set attributes). This supports the use of -# qualified names. -ignored-classes=optparse.Values,thread._local,_thread._local - -# List of module names for which member attributes should not be checked -# (useful for modules/projects where namespaces are manipulated during runtime -# and thus existing member attributes cannot be deduced by static analysis). It -# supports qualified module names, as well as Unix pattern matching. -ignored-modules= - -# Show a hint with possible names when a member name was not found. The aspect -# of finding the hint is based on edit distance. -missing-member-hint=yes - -# The minimum edit distance a name should have in order to be considered a -# similar match for a missing member name. -missing-member-hint-distance=1 - -# The total number of similar names that should be taken in consideration when -# showing a hint for a missing member. -missing-member-max-choices=1 - -# List of decorators that change the signature of a decorated function. -signature-mutators= - - -[VARIABLES] - -# List of additional names supposed to be defined in builtins. Remember that -# you should avoid defining new builtins when possible. -additional-builtins= - -# Tells whether unused global variables should be treated as a violation. -allow-global-unused-variables=yes - -# List of strings which can identify a callback function by name. A callback -# name must start or end with one of those strings. -callbacks=cb_, - _cb - -# A regular expression matching the name of dummy variables (i.e. expected to -# not be used). -dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ - -# Argument names that match this expression will be ignored. Default to name -# with leading underscore. -ignored-argument-names=_.*|^ignored_|^unused_ - -# Tells whether we should check for unused import in __init__ files. -init-import=no - -# List of qualified module names which can have objects that can redefine -# builtins. -redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io - - -[FORMAT] - -# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. -expected-line-ending-format= - -# Regexp for a line that is allowed to be longer than the limit. -ignore-long-lines=^\s*(# )??$ - -# Number of spaces of indent required inside a hanging or continued line. -indent-after-paren=4 - -# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 -# tab). -indent-string=' ' - -# Maximum number of characters on a single line. -max-line-length=100 - -# Maximum number of lines in a module. -max-module-lines=1000 - -# List of optional constructs for which whitespace checking is disabled. `dict- -# separator` is used to allow tabulation in dicts, etc.: {1 : 1,\n222: 2}. -# `trailing-comma` allows a space between comma and closing bracket: (a, ). -# `empty-line` allows space-only lines. -no-space-check=trailing-comma, - dict-separator - -# Allow the body of a class to be on the same line as the declaration if body -# contains single statement. -single-line-class-stmt=no - -# Allow the body of an if to be on the same line as the test if there is no -# else. -single-line-if-stmt=no - -# Good variable names which should always be accepted, separated by a comma -good-names=kv,nr,nc,sy,ss,iu,hy,hk - - -[SIMILARITIES] - -# Ignore comments when computing similarities. -ignore-comments=yes - -# Ignore docstrings when computing similarities. -ignore-docstrings=yes - -# Ignore imports when computing similarities. -ignore-imports=no - -# Minimum lines number of a similarity. -min-similarity-lines=4 - - -[BASIC] - -# Naming style matching correct argument names. -argument-naming-style=snake_case - -# Regular expression matching correct argument names. Overrides argument- -# naming-style. -#argument-rgx= - -# Naming style matching correct attribute names. -attr-naming-style=snake_case - -# Regular expression matching correct attribute names. Overrides attr-naming- -# style. -#attr-rgx= - -# Bad variable names which should always be refused, separated by a comma. -bad-names=foo, - bar, - baz, - toto, - tutu, - tata - -# Naming style matching correct class attribute names. -class-attribute-naming-style=any - -# Regular expression matching correct class attribute names. Overrides class- -# attribute-naming-style. -#class-attribute-rgx= - -# Naming style matching correct class names. -class-naming-style=PascalCase - -# Regular expression matching correct class names. Overrides class-naming- -# style. -#class-rgx= - -# Naming style matching correct constant names. -const-naming-style=UPPER_CASE - -# Regular expression matching correct constant names. Overrides const-naming- -# style. -#const-rgx= - -# Minimum line length for functions/classes that require docstrings, shorter -# ones are exempt. -docstring-min-length=-1 - -# Naming style matching correct function names. -function-naming-style=snake_case - -# Regular expression matching correct function names. Overrides function- -# naming-style. -#function-rgx= - -# Good variable names which should always be accepted, separated by a comma. -good-names=i, - j, - k, - ex, - Run, - _ - -# Include a hint for the correct naming format with invalid-name. -include-naming-hint=no - -# Naming style matching correct inline iteration names. -inlinevar-naming-style=any - -# Regular expression matching correct inline iteration names. Overrides -# inlinevar-naming-style. -#inlinevar-rgx= - -# Naming style matching correct method names. -method-naming-style=snake_case - -# Regular expression matching correct method names. Overrides method-naming- -# style. -#method-rgx= - -# Naming style matching correct module names. -module-naming-style=snake_case - -# Regular expression matching correct module names. Overrides module-naming- -# style. -#module-rgx= - -# Colon-delimited sets of names that determine each other's naming style when -# the name regexes allow several styles. -name-group= - -# Regular expression which should only match function or class names that do -# not require a docstring. -no-docstring-rgx=^_ - -# List of decorators that produce properties, such as abc.abstractproperty. Add -# to this list to register other decorators that produce valid properties. -# These decorators are taken in consideration only for invalid-name. -property-classes=abc.abstractproperty - -# Naming style matching correct variable names. -variable-naming-style=snake_case - -# Regular expression matching correct variable names. Overrides variable- -# naming-style. -#variable-rgx= - - -[STRING] - -# This flag controls whether the implicit-str-concat-in-sequence should -# generate a warning on implicit string concatenation in sequences defined over -# several lines. -check-str-concat-over-line-jumps=no - - -[IMPORTS] - -# List of modules that can be imported at any level, not just the top level -# one. -allow-any-import-level= - -# Allow wildcard imports from modules that define __all__. -allow-wildcard-with-all=no - -# Analyse import fallback blocks. This can be used to support both Python 2 and -# 3 compatible code, which means that the block might have code that exists -# only in one or another interpreter, leading to false positives when analysed. -analyse-fallback-blocks=no - -# Deprecated modules which should not be used, separated by a comma. -deprecated-modules=optparse,tkinter.tix - -# Create a graph of external dependencies in the given file (report RP0402 must -# not be disabled). -ext-import-graph= - -# Create a graph of every (i.e. internal and external) dependencies in the -# given file (report RP0402 must not be disabled). -import-graph= - -# Create a graph of internal dependencies in the given file (report RP0402 must -# not be disabled). -int-import-graph= - -# Force import order to recognize a module as part of the standard -# compatibility libraries. -known-standard-library= - -# Force import order to recognize a module as part of a third party library. -known-third-party=enchant - -# Couples of modules and preferred modules, separated by a comma. -preferred-modules= - - -[CLASSES] - -# List of method names used to declare (i.e. assign) instance attributes. -defining-attr-methods=__init__, - __new__, - setUp, - __post_init__ - -# List of member names, which should be excluded from the protected access -# warning. -exclude-protected=_asdict, - _fields, - _replace, - _source, - _make - -# List of valid names for the first argument in a class method. -valid-classmethod-first-arg=cls - -# List of valid names for the first argument in a metaclass class method. -valid-metaclass-classmethod-first-arg=cls - - -[DESIGN] - -# Maximum number of arguments for function / method. -max-args=5 - -# Maximum number of attributes for a class (see R0902). -max-attributes=7 - -# Maximum number of boolean expressions in an if statement (see R0916). -max-bool-expr=5 - -# Maximum number of branch for function / method body. -max-branches=12 - -# Maximum number of locals for function / method body. -max-locals=15 - -# Maximum number of parents for a class (see R0901). -max-parents=7 - -# Maximum number of public methods for a class (see R0904). -max-public-methods=20 - -# Maximum number of return / yield for function / method body. -max-returns=6 - -# Maximum number of statements in function / method body. -max-statements=50 - -# Minimum number of public methods for a class (see R0903). -min-public-methods=2 - - -[EXCEPTIONS] - -# Exceptions that will emit a warning when being caught. Defaults to -# "BaseException, Exception". -overgeneral-exceptions=BaseException, - Exception diff --git a/_config.yml b/_config.yml deleted file mode 100644 index 2f7efbeab..000000000 --- a/_config.yml +++ /dev/null @@ -1 +0,0 @@ -theme: jekyll-theme-minimal \ No newline at end of file diff --git a/flopy/mfusg/mfusgcln.py b/flopy/mfusg/mfusgcln.py index 63af3f7f7..04b372381 100644 --- a/flopy/mfusg/mfusgcln.py +++ b/flopy/mfusg/mfusgcln.py @@ -1,4 +1,3 @@ -# pylint: disable=E1101 """ Mfusgcln module. diff --git a/flopy/mfusg/mfusgsms.py b/flopy/mfusg/mfusgsms.py index 0866a7ae3..865e9d5f4 100644 --- a/flopy/mfusg/mfusgsms.py +++ b/flopy/mfusg/mfusgsms.py @@ -1,4 +1,3 @@ -# pylint: disable=too-many-instance-attributes """ mfusgsms module. This is the solver for MODFLOW-USG. diff --git a/flopy/utils/utils_def.py b/flopy/utils/utils_def.py index c157fd563..1a4cd56dd 100644 --- a/flopy/utils/utils_def.py +++ b/flopy/utils/utils_def.py @@ -1,4 +1,3 @@ -# pylint: disable=E1101 """ Generic classes and utility functions """ From 4a26cab4e0af4f49775fd0dc327c8f5ff51843f6 Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Fri, 7 Jun 2024 03:20:09 +1200 Subject: [PATCH 08/57] fix(docs): section underline matches section title (#2208) This PR fixes the doc section headers to have consistent lengths. It is a cause of many errors while building docs with sphinx. This was mostly automated with ruff check --select=D409 --fix with the rule kept in pyproject.toml for future-proofing. --- flopy/discretization/grid.py | 2 +- flopy/discretization/structuredgrid.py | 2 +- flopy/discretization/vertexgrid.py | 2 +- flopy/export/netcdf.py | 2 +- flopy/export/utils.py | 13 ++++++----- flopy/mbase.py | 2 +- flopy/mf6/coordinates/modeldimensions.py | 6 ++--- flopy/mf6/coordinates/modelgrid.py | 8 +++---- flopy/mf6/data/mfdataarray.py | 4 ++-- flopy/mf6/data/mfdatalist.py | 6 ++--- flopy/mf6/data/mfdataplist.py | 6 ++--- flopy/mf6/data/mfdatascalar.py | 5 +++-- flopy/mf6/data/mfdatautil.py | 2 +- flopy/mf6/data/mfstructure.py | 6 ++--- flopy/mf6/mfpackage.py | 2 +- flopy/mf6/mfsimbase.py | 28 ++++++++++++------------ flopy/mf6/modflow/mfgnc.py | 2 +- flopy/mf6/modflow/mfgwemve.py | 2 +- flopy/mf6/modflow/mfgwfgnc.py | 2 +- flopy/mf6/modflow/mfgwfmvr.py | 2 +- flopy/mf6/modflow/mfgwtmvt.py | 2 +- flopy/mf6/modflow/mfmvr.py | 2 +- flopy/mf6/modflow/mfmvt.py | 2 +- flopy/mf6/modflow/mfutlats.py | 2 +- flopy/mf6/modflow/mfutlobs.py | 2 +- flopy/mf6/modflow/mfutltas.py | 2 +- flopy/mf6/modflow/mfutlts.py | 2 +- flopy/mf6/modflow/mfutltvk.py | 2 +- flopy/mf6/modflow/mfutltvs.py | 2 +- flopy/mf6/utils/binaryfile_utils.py | 6 ++--- flopy/mf6/utils/mfobservation.py | 15 +++++++------ flopy/mfusg/mfusg.py | 2 +- flopy/mfusg/mfusgwel.py | 10 +++++---- flopy/modflow/mfwel.py | 6 +++-- flopy/pakbase.py | 2 +- flopy/utils/binaryfile.py | 22 +++++++++---------- flopy/utils/datafile.py | 10 ++++----- flopy/utils/formattedfile.py | 4 ++-- flopy/utils/geometry.py | 14 ++++++------ flopy/utils/mflistfile.py | 4 ++-- flopy/utils/modpathfile.py | 4 ++-- flopy/utils/observationfile.py | 10 ++++----- flopy/utils/particletrackfile.py | 8 +++---- flopy/utils/swroutputfile.py | 16 +++++++------- flopy/utils/util_array.py | 6 ++--- flopy/utils/util_list.py | 4 ++-- flopy/utils/utils_def.py | 6 ++--- pyproject.toml | 7 +++++- 48 files changed, 145 insertions(+), 133 deletions(-) diff --git a/flopy/discretization/grid.py b/flopy/discretization/grid.py index 87e24d457..fe5af9970 100644 --- a/flopy/discretization/grid.py +++ b/flopy/discretization/grid.py @@ -128,7 +128,7 @@ class Grid: ndarrays for the x, y, and z coordinates Methods - ---------- + ------- get_coords(x, y) transform point or array of points x, y from model coordinates to spatial coordinates diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py index befe50ded..ba5a1a430 100644 --- a/flopy/discretization/structuredgrid.py +++ b/flopy/discretization/structuredgrid.py @@ -140,7 +140,7 @@ class for a structured model grid y-location points for the edges of the model grid Methods - ---------- + ------- get_cell_vertices(i, j) returns vertices for a single cell at row, column i, j. """ diff --git a/flopy/discretization/vertexgrid.py b/flopy/discretization/vertexgrid.py index 580f6f9b4..a043e6d3d 100644 --- a/flopy/discretization/vertexgrid.py +++ b/flopy/discretization/vertexgrid.py @@ -61,7 +61,7 @@ class for a vertex model grid returns list of cells and their vertices Methods - ---------- + ------- get_cell_vertices(cellid) returns vertices for a single cell at cellid. diff --git a/flopy/export/netcdf.py b/flopy/export/netcdf.py index 6ad1daffb..02916ad06 100644 --- a/flopy/export/netcdf.py +++ b/flopy/export/netcdf.py @@ -976,7 +976,7 @@ def initialize_group( Method to initialize a new group within a netcdf file. This group can have independent dimensions from the global dimensions - Parameters: + Parameters ---------- name : str name of the netcdf group diff --git a/flopy/export/utils.py b/flopy/export/utils.py index 1c3ee4d55..b82f934f4 100644 --- a/flopy/export/utils.py +++ b/flopy/export/utils.py @@ -324,10 +324,11 @@ def output_helper( Returns ------- - None - Note: + None + + Note ---- - casts down double precision to single precision for netCDF files + casts down double precision to single precision for netCDF files """ assert isinstance(ml, (BaseModel, ModelInterface)) @@ -867,7 +868,7 @@ def mflist_export(f: Union[str, os.PathLike, NetCdf], mfl, **kwargs): export helper for MfList instances Parameters - ----------- + ---------- f : str or PathLike or NetCdf file path or existing export instance type (NetCdf only for now) mfl : MfList instance @@ -1024,7 +1025,7 @@ def transient2d_export(f: Union[str, os.PathLike], t2d, fmt=None, **kwargs): export helper for Transient2d instances Parameters - ----------- + ---------- f : str or PathLike filename or existing export instance type (NetCdf only for now) t2d : Transient2d instance @@ -1184,7 +1185,7 @@ def array3d_export(f: Union[str, os.PathLike], u3d, fmt=None, **kwargs): export helper for Transient2d instances Parameters - ----------- + ---------- f : str or PathLike filename or existing export instance type (NetCdf only for now) u3d : Util3d instance diff --git a/flopy/mbase.py b/flopy/mbase.py index 3cddbe829..b36cf2555 100644 --- a/flopy/mbase.py +++ b/flopy/mbase.py @@ -1678,7 +1678,7 @@ def plot(self, SelPackList=None, **kwargs): MfList dictionary key. (default is None) Returns - ---------- + ------- axes : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis are returned. diff --git a/flopy/mf6/coordinates/modeldimensions.py b/flopy/mf6/coordinates/modeldimensions.py index 878f74a71..b80113624 100644 --- a/flopy/mf6/coordinates/modeldimensions.py +++ b/flopy/mf6/coordinates/modeldimensions.py @@ -28,7 +28,7 @@ class DataDimensions: (optional) Methods - ---------- + ------- get_model_grid : () returns a model grid based on the current simulation data @@ -151,7 +151,7 @@ class PackageDimensions: Tuple representing the path to this package Methods - ---------- + ------- get_aux_variables : (model_num=0) returns the package's aux variables boundnames : (model_num=0) @@ -322,7 +322,7 @@ class ModelDimensions: object containing simulation time information Methods - ---------- + ------- get_model_grid : () returns a model grid based on the current simulation data diff --git a/flopy/mf6/coordinates/modelgrid.py b/flopy/mf6/coordinates/modelgrid.py index 7b3f69854..50a612511 100644 --- a/flopy/mf6/coordinates/modelgrid.py +++ b/flopy/mf6/coordinates/modelgrid.py @@ -20,7 +20,7 @@ class ModelCell: id of model cell Methods - ---------- + ------- See Also -------- @@ -50,7 +50,7 @@ class UnstructuredModelCell(ModelCell): name of the model Methods - ---------- + ------- get_cellid : () returns the cellid get_top : () @@ -341,7 +341,7 @@ class ModelGrid: DiscretizationType.DISU) Methods - ---------- + ------- grid_type : () returns the grid type grid_type_consistent : () @@ -789,7 +789,7 @@ class UnstructuredModelGrid(ModelGrid): contains all simulation related data Methods - ---------- + ------- get_unstruct_jagged_array_list : {} returns a dictionary of jagged arrays used in the unstructured grid diff --git a/flopy/mf6/data/mfdataarray.py b/flopy/mf6/data/mfdataarray.py index 786838083..d3c02dbdb 100644 --- a/flopy/mf6/data/mfdataarray.py +++ b/flopy/mf6/data/mfdataarray.py @@ -1531,7 +1531,7 @@ def plot( List of unique values to be excluded from the plot. Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. @@ -2100,7 +2100,7 @@ def plot( extracted. (default is zero). Returns - ---------- + ------- axes : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. diff --git a/flopy/mf6/data/mfdatalist.py b/flopy/mf6/data/mfdatalist.py index f35147206..ab5a2aa66 100644 --- a/flopy/mf6/data/mfdatalist.py +++ b/flopy/mf6/data/mfdatalist.py @@ -136,7 +136,7 @@ def to_array(self, kper=0, mask=False): return array with np.nan instead of zero Returns - ---------- + ------- out : dict of numpy.ndarrays Dictionary of 3-D numpy arrays containing the stress period data for a selected stress period. The dictionary keys are the @@ -1451,7 +1451,7 @@ def plot( List of unique values to be excluded from the plot. Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. @@ -2122,7 +2122,7 @@ def plot( List of unique values to be excluded from the plot. Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. diff --git a/flopy/mf6/data/mfdataplist.py b/flopy/mf6/data/mfdataplist.py index dd397699d..507795e70 100644 --- a/flopy/mf6/data/mfdataplist.py +++ b/flopy/mf6/data/mfdataplist.py @@ -815,7 +815,7 @@ def to_array(self, kper=0, mask=False): return array with np.nan instead of zero Returns - ---------- + ------- out : dict of numpy.ndarrays Dictionary of 3-D numpy arrays containing the stress period data for a selected stress period. The dictionary keys are the @@ -1882,7 +1882,7 @@ def plot( List of unique values to be excluded from the plot. Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. @@ -2604,7 +2604,7 @@ def plot( List of unique values to be excluded from the plot. Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. diff --git a/flopy/mf6/data/mfdatascalar.py b/flopy/mf6/data/mfdatascalar.py index 8eb482428..94c4f1e3f 100644 --- a/flopy/mf6/data/mfdatascalar.py +++ b/flopy/mf6/data/mfdatascalar.py @@ -668,7 +668,8 @@ def plot(self, filename_base=None, file_extension=None, **kwargs): """ Helper method to plot scalar objects - Parameters: + Parameters + ---------- scalar : flopy.mf6.data.mfscalar object filename_base : str Base file name that will be used to automatically generate file @@ -976,7 +977,7 @@ def plot( extracted. (default is zero). Returns - ---------- + ------- axes : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. diff --git a/flopy/mf6/data/mfdatautil.py b/flopy/mf6/data/mfdatautil.py index 9a4d6cdbf..ec244c3fe 100644 --- a/flopy/mf6/data/mfdatautil.py +++ b/flopy/mf6/data/mfdatautil.py @@ -155,7 +155,7 @@ def list_to_array(sarr, model_grid, kper=0, mask=False): return array with np.nan instead of zero Returns - ---------- + ------- out : dict of numpy.ndarrays Dictionary of 3-D numpy arrays containing the stress period data for a selected stress period. The dictionary keys are the diff --git a/flopy/mf6/data/mfstructure.py b/flopy/mf6/data/mfstructure.py index f45625ed5..0b4c65969 100644 --- a/flopy/mf6/data/mfstructure.py +++ b/flopy/mf6/data/mfstructure.py @@ -63,7 +63,7 @@ class Dfn: ----- Examples - ---- + -------- """ def __init__(self): @@ -179,7 +179,7 @@ class DfnPackage(Dfn): ----- Examples - ---- + -------- """ def __init__(self, package): @@ -447,7 +447,7 @@ class DfnFile(Dfn): ----- Examples - ---- + -------- """ def __init__(self, file): diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index 43a49e5c3..f16d7be09 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -3279,7 +3279,7 @@ def plot(self, **kwargs): MfList dictionary key. (default is None) Returns - ---------- + ------- axes : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis are returned. diff --git a/flopy/mf6/mfsimbase.py b/flopy/mf6/mfsimbase.py index e924bfebf..793bee6b9 100644 --- a/flopy/mf6/mfsimbase.py +++ b/flopy/mf6/mfsimbase.py @@ -603,7 +603,7 @@ def __repr__(self): Override __repr__ to print custom string. Returns - -------- + ------- repr string : str string describing object @@ -615,7 +615,7 @@ def __str__(self): Override __str__ to print custom string. Returns - -------- + ------- str string : str string describing object @@ -671,7 +671,7 @@ def model_names(self): Return a list of model names associated with this simulation. Returns - -------- + ------- list: list of model names """ @@ -683,7 +683,7 @@ def exchange_files(self): Return list of exchange files associated with this simulation. Returns - -------- + ------- list: list of exchange names """ @@ -1699,7 +1699,7 @@ def run_simulation( default is None, i.e. use the builtion print Returns - -------- + ------- success : bool buff : list of lines of stdout @@ -1786,7 +1786,7 @@ def model_dict(self): Return a dictionary of models associated with this simulation. Returns - -------- + ------- model dict : dict dictionary of models @@ -1805,7 +1805,7 @@ def get_model(self, model_name=None): will get the first model. Returns - -------- + ------- model : MFModel """ @@ -1833,7 +1833,7 @@ def get_exchange_file(self, filename): Name of exchange file to get Returns - -------- + ------- exchange package : MFPackage """ @@ -1853,7 +1853,7 @@ def get_file(self, filename): Name of mover file to get Returns - -------- + ------- mover package : MFPackage """ @@ -2076,7 +2076,7 @@ def register_package( Produce a filename for this package Returns - -------- + ------- (path : tuple, package structure : MFPackageStructure) """ @@ -2213,7 +2213,7 @@ def register_model(self, model, model_type, model_name, model_namefile): Solution group of model Returns - -------- + ------- model_structure_object : MFModelStructure """ @@ -2262,7 +2262,7 @@ def get_solution_package(self, key): solution package file name Returns - -------- + ------- solution_package : MFPackage """ @@ -2318,7 +2318,7 @@ def is_valid(self): Returns - -------- + ------- valid : bool Whether this is a valid simulation @@ -2556,7 +2556,7 @@ def plot( MFList dictionary key. (default is None) Returns - -------- + ------- axes: (list) matplotlib.pyplot.axes objects diff --git a/flopy/mf6/modflow/mfgnc.py b/flopy/mf6/modflow/mfgnc.py index 434f39752..b40a6493a 100644 --- a/flopy/mf6/modflow/mfgnc.py +++ b/flopy/mf6/modflow/mfgnc.py @@ -229,7 +229,7 @@ class GncPackages(mfpackage.MFChildPackages): GncPackages is a container class for the ModflowGnc class. Methods - ---------- + ------- initialize Initializes a new ModflowGnc package removing any sibling child packages attached to the same parent package. See ModflowGnc init diff --git a/flopy/mf6/modflow/mfgwemve.py b/flopy/mf6/modflow/mfgwemve.py index e57ef4646..744439c1e 100644 --- a/flopy/mf6/modflow/mfgwemve.py +++ b/flopy/mf6/modflow/mfgwemve.py @@ -197,7 +197,7 @@ class GwemvePackages(mfpackage.MFChildPackages): GwemvePackages is a container class for the ModflowGwemve class. Methods - ---------- + ------- initialize Initializes a new ModflowGwemve package removing any sibling child packages attached to the same parent package. See ModflowGwemve init diff --git a/flopy/mf6/modflow/mfgwfgnc.py b/flopy/mf6/modflow/mfgwfgnc.py index 445737947..7f2a16ec1 100644 --- a/flopy/mf6/modflow/mfgwfgnc.py +++ b/flopy/mf6/modflow/mfgwfgnc.py @@ -229,7 +229,7 @@ class GwfgncPackages(mfpackage.MFChildPackages): GwfgncPackages is a container class for the ModflowGwfgnc class. Methods - ---------- + ------- initialize Initializes a new ModflowGwfgnc package removing any sibling child packages attached to the same parent package. See ModflowGwfgnc init diff --git a/flopy/mf6/modflow/mfgwfmvr.py b/flopy/mf6/modflow/mfgwfmvr.py index 73949339d..1423f652a 100644 --- a/flopy/mf6/modflow/mfgwfmvr.py +++ b/flopy/mf6/modflow/mfgwfmvr.py @@ -406,7 +406,7 @@ class GwfmvrPackages(mfpackage.MFChildPackages): GwfmvrPackages is a container class for the ModflowGwfmvr class. Methods - ---------- + ------- initialize Initializes a new ModflowGwfmvr package removing any sibling child packages attached to the same parent package. See ModflowGwfmvr init diff --git a/flopy/mf6/modflow/mfgwtmvt.py b/flopy/mf6/modflow/mfgwtmvt.py index 9bfdf2438..d67b24494 100644 --- a/flopy/mf6/modflow/mfgwtmvt.py +++ b/flopy/mf6/modflow/mfgwtmvt.py @@ -197,7 +197,7 @@ class GwtmvtPackages(mfpackage.MFChildPackages): GwtmvtPackages is a container class for the ModflowGwtmvt class. Methods - ---------- + ------- initialize Initializes a new ModflowGwtmvt package removing any sibling child packages attached to the same parent package. See ModflowGwtmvt init diff --git a/flopy/mf6/modflow/mfmvr.py b/flopy/mf6/modflow/mfmvr.py index 2d6841226..2d30d3e13 100644 --- a/flopy/mf6/modflow/mfmvr.py +++ b/flopy/mf6/modflow/mfmvr.py @@ -406,7 +406,7 @@ class MvrPackages(mfpackage.MFChildPackages): MvrPackages is a container class for the ModflowMvr class. Methods - ---------- + ------- initialize Initializes a new ModflowMvr package removing any sibling child packages attached to the same parent package. See ModflowMvr init diff --git a/flopy/mf6/modflow/mfmvt.py b/flopy/mf6/modflow/mfmvt.py index 8c16eea4a..a4994f084 100644 --- a/flopy/mf6/modflow/mfmvt.py +++ b/flopy/mf6/modflow/mfmvt.py @@ -197,7 +197,7 @@ class MvtPackages(mfpackage.MFChildPackages): MvtPackages is a container class for the ModflowMvt class. Methods - ---------- + ------- initialize Initializes a new ModflowMvt package removing any sibling child packages attached to the same parent package. See ModflowMvt init diff --git a/flopy/mf6/modflow/mfutlats.py b/flopy/mf6/modflow/mfutlats.py index c8752d9f6..09925eb87 100644 --- a/flopy/mf6/modflow/mfutlats.py +++ b/flopy/mf6/modflow/mfutlats.py @@ -177,7 +177,7 @@ class UtlatsPackages(mfpackage.MFChildPackages): UtlatsPackages is a container class for the ModflowUtlats class. Methods - ---------- + ------- initialize Initializes a new ModflowUtlats package removing any sibling child packages attached to the same parent package. See ModflowUtlats init diff --git a/flopy/mf6/modflow/mfutlobs.py b/flopy/mf6/modflow/mfutlobs.py index a917c35d8..8464e6d89 100644 --- a/flopy/mf6/modflow/mfutlobs.py +++ b/flopy/mf6/modflow/mfutlobs.py @@ -217,7 +217,7 @@ class UtlobsPackages(mfpackage.MFChildPackages): UtlobsPackages is a container class for the ModflowUtlobs class. Methods - ---------- + ------- initialize Initializes a new ModflowUtlobs package removing any sibling child packages attached to the same parent package. See ModflowUtlobs init diff --git a/flopy/mf6/modflow/mfutltas.py b/flopy/mf6/modflow/mfutltas.py index 078f1a8f2..b00d56dbc 100644 --- a/flopy/mf6/modflow/mfutltas.py +++ b/flopy/mf6/modflow/mfutltas.py @@ -196,7 +196,7 @@ class UtltasPackages(mfpackage.MFChildPackages): UtltasPackages is a container class for the ModflowUtltas class. Methods - ---------- + ------- initialize Initializes a new ModflowUtltas package removing any sibling child packages attached to the same parent package. See ModflowUtltas init diff --git a/flopy/mf6/modflow/mfutlts.py b/flopy/mf6/modflow/mfutlts.py index 38e70b352..3113e423e 100644 --- a/flopy/mf6/modflow/mfutlts.py +++ b/flopy/mf6/modflow/mfutlts.py @@ -272,7 +272,7 @@ class UtltsPackages(mfpackage.MFChildPackages): UtltsPackages is a container class for the ModflowUtlts class. Methods - ---------- + ------- initialize Initializes a new ModflowUtlts package removing any sibling child packages attached to the same parent package. See ModflowUtlts init diff --git a/flopy/mf6/modflow/mfutltvk.py b/flopy/mf6/modflow/mfutltvk.py index 4f72614eb..1d7740351 100644 --- a/flopy/mf6/modflow/mfutltvk.py +++ b/flopy/mf6/modflow/mfutltvk.py @@ -236,7 +236,7 @@ class UtltvkPackages(mfpackage.MFChildPackages): UtltvkPackages is a container class for the ModflowUtltvk class. Methods - ---------- + ------- initialize Initializes a new ModflowUtltvk package removing any sibling child packages attached to the same parent package. See ModflowUtltvk init diff --git a/flopy/mf6/modflow/mfutltvs.py b/flopy/mf6/modflow/mfutltvs.py index a21b93b8d..d9fe1111c 100644 --- a/flopy/mf6/modflow/mfutltvs.py +++ b/flopy/mf6/modflow/mfutltvs.py @@ -236,7 +236,7 @@ class UtltvsPackages(mfpackage.MFChildPackages): UtltvsPackages is a container class for the ModflowUtltvs class. Methods - ---------- + ------- initialize Initializes a new ModflowUtltvs package removing any sibling child packages attached to the same parent package. See ModflowUtltvs init diff --git a/flopy/mf6/utils/binaryfile_utils.py b/flopy/mf6/utils/binaryfile_utils.py index aba094a04..91bcd0abd 100644 --- a/flopy/mf6/utils/binaryfile_utils.py +++ b/flopy/mf6/utils/binaryfile_utils.py @@ -57,7 +57,7 @@ class MFOutputRequester: binary data from the SimulationDict() object on the fly without actually storing it in the SimulationDict() object. - Parameters: + Parameters ---------- mfdict: dict local instance of the SimulationDict() object @@ -66,12 +66,12 @@ class MFOutputRequester: key: tuple user requested data key - Methods: + Methods ------- MFOutputRequester.querybinarydata returns: Xarray object - Examples: + Examples -------- >>> data = MFOutputRequester(mfdict, path, key) >>> data.querybinarydata diff --git a/flopy/mf6/utils/mfobservation.py b/flopy/mf6/utils/mfobservation.py index 4699e7d45..33e3eefe1 100644 --- a/flopy/mf6/utils/mfobservation.py +++ b/flopy/mf6/utils/mfobservation.py @@ -53,15 +53,16 @@ class Observations: Simple class to extract and view Observation files for Uzf models (possibly all obs/hobs)? - Input: - ------ - fi = (string) name of the observation binary output file + Parameters + ---------- + fi : str + name of the observation binary output file - Methods: - -------- + Methods + ------- get_data(): (np.array) returns array of observation data parameters: - ----------- + ---------- text = (str) specific modflow record name contained in Obs.out file idx = (int), (slice(start, stop)) integer or slice of data to be returned. corresponds to kstp*kper - 1 @@ -478,7 +479,7 @@ def _get_obsfile_names(self, partial_key, OBS8, obstype): obstype: (string) SINGLE or CONTINUOUS Returns: - -------- + ------- sets key: path to self.obs_dataDict """ diff --git a/flopy/mfusg/mfusg.py b/flopy/mfusg/mfusg.py index 9c01130a0..6f84d2b40 100644 --- a/flopy/mfusg/mfusg.py +++ b/flopy/mfusg/mfusg.py @@ -307,7 +307,7 @@ def _load_packages( Option to raise exceptions on package load failure. Returns - ---------- + ------- files_successfully_loaded : list of loaded files files_not_loaded : list of files that were not loaded """ diff --git a/flopy/mfusg/mfusgwel.py b/flopy/mfusg/mfusgwel.py index bb3ca0bbe..493c7a19c 100644 --- a/flopy/mfusg/mfusgwel.py +++ b/flopy/mfusg/mfusgwel.py @@ -249,9 +249,10 @@ def __init__( def _check_for_aux(self, options, cln=False): """Check dtype for auxiliary variables, and add to options. - Parameters: + Parameters ---------- - options: (list) package options + options: list + package options Returns ------- @@ -278,9 +279,10 @@ def _check_for_aux(self, options, cln=False): def write_file(self, f=None): """Write the package file. - Parameters: + Parameters ---------- - f: (str) optional file name + f : str, optional + file name Returns ------- diff --git a/flopy/modflow/mfwel.py b/flopy/modflow/mfwel.py index b6532331e..e211c949a 100644 --- a/flopy/modflow/mfwel.py +++ b/flopy/modflow/mfwel.py @@ -251,8 +251,10 @@ def write_file(self, f=None): """ Write the package file. - Parameters: - f: (str) optional file name + Parameters + ---------- + f : str, optional + file name Returns ------- diff --git a/flopy/pakbase.py b/flopy/pakbase.py index 2e69c7096..e71463021 100644 --- a/flopy/pakbase.py +++ b/flopy/pakbase.py @@ -812,7 +812,7 @@ def plot(self, **kwargs): MfList dictionary key. (default is None) Returns - ---------- + ------- axes : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis are returned. diff --git a/flopy/utils/binaryfile.py b/flopy/utils/binaryfile.py index fa70ad254..a7d57c4e4 100644 --- a/flopy/utils/binaryfile.py +++ b/flopy/utils/binaryfile.py @@ -554,7 +554,7 @@ def get_ts(self, idx): row, and column values must be zero based. Returns - ---------- + ------- out : numpy array Array has size (ntimes, ncells + 1). The first column in the data array will contain time (totim). @@ -937,7 +937,7 @@ def get_ts(self, idx): values must be zero based. Returns - ---------- + ------- out : numpy array Array has size (ntimes, ncells + 1). The first column in the data array will contain time (totim). @@ -1474,7 +1474,7 @@ def get_unique_record_names(self, decode=False): Optional boolean used to decode byte strings (default is False). Returns - ---------- + ------- names : list of strings List of unique text names in the binary file. @@ -1499,7 +1499,7 @@ def get_unique_package_names(self, decode=False, to=False): Optional boolean used to decode byte strings (default is False). Returns - ---------- + ------- names : list of strings List of unique package names in the binary file. @@ -1520,7 +1520,7 @@ def _unique_package_names(self, to=False): Get a list of unique package names in the file Returns - ---------- + ------- out : list of strings List of unique package names in the binary file. @@ -1551,7 +1551,7 @@ def get_indices(self, text=None): 'RIVER LEAKAGE', 'STORAGE', 'FLOW RIGHT FACE', etc. Returns - ---------- + ------- out : tuple indices of selected record name in budget file. @@ -1632,7 +1632,7 @@ def get_data( 'COMPACT BUDGET' MODFLOW budget file. (Default is False.) Returns - ---------- + ------- recordlist : list of records A list of budget objects. The structure of the returned object depends on the structure of the data in the cbb file. @@ -1738,7 +1738,7 @@ def get_ts(self, idx, text=None, times=None): List of times to from which to get time series. Returns - ---------- + ------- out : numpy array Array has size (ntimes, ncells + 1). The first column in the data array will contain time (totim). @@ -1885,7 +1885,7 @@ def get_record(self, idx, full3D=False): 'COMPACT BUDGET' MODFLOW budget file. (Default is False.) Returns - ---------- + ------- record : a single data record The structure of the returned object depends on the structure of the data in the cbb file. Compact list data are returned as @@ -2065,7 +2065,7 @@ def __create3D(self, data): Dictionary with node keywords and flows (q) items. Returns - ---------- + ------- out : numpy masked array List contains unique simulation times (totim) in binary file. @@ -2083,7 +2083,7 @@ def get_times(self): Get a list of unique times in the file Returns - ---------- + ------- out : list of floats List contains unique simulation times (totim) in binary file. diff --git a/flopy/utils/datafile.py b/flopy/utils/datafile.py index 6477ecfe0..d5043f2f0 100644 --- a/flopy/utils/datafile.py +++ b/flopy/utils/datafile.py @@ -252,7 +252,7 @@ def to_shapefile( Whether to print verbose output Returns - ---------- + ------- None See Also @@ -341,7 +341,7 @@ def plot( if filename_base is not None. (default is 'png') Returns - ---------- + ------- None See Also @@ -468,7 +468,7 @@ def get_times(self): Get a list of unique times in the file Returns - ---------- + ------- out : list of floats List contains unique simulation times (totim) in binary file. @@ -505,7 +505,7 @@ def get_data(self, kstpkper=None, idx=None, totim=None, mflay=None): all layers will be included. (Default is None.) Returns - ---------- + ------- data : numpy array Array has size (nlay, nrow, ncol) if mflay is None or it has size (nrow, ncol) if mlay is specified. @@ -556,7 +556,7 @@ def get_alldata(self, mflay=None, nodata=-9999): nodata value will be assigned np.nan. Returns - ---------- + ------- data : numpy array Array has size (ntimes, nlay, nrow, ncol) if mflay is None or it has size (ntimes, nrow, ncol) if mlay is specified. diff --git a/flopy/utils/formattedfile.py b/flopy/utils/formattedfile.py index dd4c795ba..784265037 100644 --- a/flopy/utils/formattedfile.py +++ b/flopy/utils/formattedfile.py @@ -54,7 +54,7 @@ def read_header(self, text_file): the header Returns - ---------- + ------- out : numpy array of header information also stores the header's format string as self.format_string @@ -257,7 +257,7 @@ def get_ts(self, idx): row, and column values must be zero based. Returns - ---------- + ------- out : numpy array Array has size (ntimes, ncells + 1). The first column in the data array will contain time (totim). diff --git a/flopy/utils/geometry.py b/flopy/utils/geometry.py index d22990f8a..dac27c144 100644 --- a/flopy/utils/geometry.py +++ b/flopy/utils/geometry.py @@ -11,7 +11,7 @@ class Shape: """ Parent class for handling geo interfacing, do not instantiate directly - Parameters: + Parameters ---------- type : str shapetype string @@ -233,9 +233,9 @@ class MultiPolygon(Collection): Container for housing and describing multipolygon geometries (e.g. to be read or written to shapefiles or other geographic data formats) - Parameters: + Parameters ---------- - polygons : list + polygons : list, tuple, default () list of flopy.utils.geometry.Polygon objects """ @@ -261,9 +261,9 @@ class MultiLineString(Collection): Container for housing and describing multilinestring geometries (e.g. to be read or written to shapefiles or other geographic data formats) - Parameters: + Parameters ---------- - polygons : list + linestrings : list, tuple, default () list of flopy.utils.geometry.LineString objects """ @@ -289,9 +289,9 @@ class MultiPoint(Collection): Container for housing and describing multipoint geometries (e.g. to be read or written to shapefiles or other geographic data formats) - Parameters: + Parameters ---------- - polygons : list + points : list, tuple, default () list of flopy.utils.geometry.Point objects """ diff --git a/flopy/utils/mflistfile.py b/flopy/utils/mflistfile.py index 38d4b2193..df952c985 100644 --- a/flopy/utils/mflistfile.py +++ b/flopy/utils/mflistfile.py @@ -183,7 +183,7 @@ def get_kstpkper(self): water budgets. Returns - ---------- + ------- out : list of (kstp, kper) tuples List of unique kstp, kper combinations in list file. kstp and kper values are zero-based. @@ -551,7 +551,7 @@ def get_reduced_pumping(self): file. Example - -------- + ------- >>> objLST = MfListBudget("my_model.lst") >>> raryReducedPpg = objLST.get_reduced_pumping() >>> dfReducedPpg = pd.DataFrame.from_records(raryReducedPpg) diff --git a/flopy/utils/modpathfile.py b/flopy/utils/modpathfile.py index 813160c5d..8a4dc9347 100644 --- a/flopy/utils/modpathfile.py +++ b/flopy/utils/modpathfile.py @@ -585,7 +585,7 @@ def get_maxtraveltime(self): Get the maximum travel time. Returns - ---------- + ------- out : float Maximum travel time. @@ -600,7 +600,7 @@ def get_alldata(self): ---------- Returns - ---------- + ------- data : numpy record array A numpy recarray with the endpoint particle data diff --git a/flopy/utils/observationfile.py b/flopy/utils/observationfile.py index f2b612f8f..73325e113 100644 --- a/flopy/utils/observationfile.py +++ b/flopy/utils/observationfile.py @@ -18,7 +18,7 @@ def get_times(self): Get a list of unique times in the file Returns - ---------- + ------- out : list of floats List contains unique simulation times (totim) in binary file. @@ -30,7 +30,7 @@ def get_ntimes(self): Get the number of times in the file Returns - ---------- + ------- out : int The number of simulation times (totim) in binary file. @@ -42,7 +42,7 @@ def get_nobs(self): Get the number of observations in the file Returns - ---------- + ------- out : tuple of int A tuple with the number of records and number of flow items in the file. The number of flow items is non-zero only if @@ -56,7 +56,7 @@ def get_obsnames(self): Get a list of observation names in the file Returns - ---------- + ------- out : list of strings List of observation names in the binary file. totim is not included in the list of observation names. @@ -82,7 +82,7 @@ def get_data(self, idx=None, obsname=None, totim=None): data for all simulation times are returned. (default is None) Returns - ---------- + ------- data : numpy record array Array has size (ntimes, nitems). totim is always returned. nitems is 2 if idx or obsname is not None or nobs+1. diff --git a/flopy/utils/particletrackfile.py b/flopy/utils/particletrackfile.py index 5029c5860..5cc7da452 100644 --- a/flopy/utils/particletrackfile.py +++ b/flopy/utils/particletrackfile.py @@ -60,7 +60,7 @@ def get_maxid(self) -> int: Get the maximum particle ID. Returns - ---------- + ------- out : int Maximum particle ID. @@ -72,7 +72,7 @@ def get_maxtime(self) -> float: Get the maximum tracking time. Returns - ---------- + ------- out : float Maximum tracking time. @@ -99,7 +99,7 @@ def get_data( Whether to return only the minimal, canonical fields. Default is False. Returns - ---------- + ------- data : np.recarray Recarray with dtype ParticleTrackFile.outdtype @@ -136,7 +136,7 @@ def get_alldata(self, totim=None, ge=True, minimal=False): Whether to return only the minimal, canonical fields. Default is False. Returns - ---------- + ------- data : list of numpy record arrays List of recarrays with dtype ParticleTrackFile.outdtype diff --git a/flopy/utils/swroutputfile.py b/flopy/utils/swroutputfile.py index a75beeb7a..e15948de2 100644 --- a/flopy/utils/swroutputfile.py +++ b/flopy/utils/swroutputfile.py @@ -109,7 +109,7 @@ def get_connectivity(self): ---------- Returns - ---------- + ------- data : numpy array Array has size (nrecord, 3). None is returned if swrtype is not 'flow' @@ -134,7 +134,7 @@ def get_nrecords(self): Get the number of records in the file Returns - ---------- + ------- out : tuple of int A tuple with the number of records and number of flow items in the file. The number of flow items is non-zero only if @@ -149,7 +149,7 @@ def get_kswrkstpkper(self): in the file Returns - ---------- + ------- out : list of (kswr, kstp, kper) tuples List of unique kswr, kstp, kper combinations in binary file. kswr, kstp, and kper values are zero-based. @@ -162,7 +162,7 @@ def get_ntimes(self): Get the number of times in the file Returns - ---------- + ------- out : int The number of simulation times (totim) in binary file. @@ -174,7 +174,7 @@ def get_times(self): Get a list of unique times in the file Returns - ---------- + ------- out : list of floats List contains unique simulation times (totim) in binary file. @@ -186,7 +186,7 @@ def get_record_names(self): Get a list of unique record names in the file Returns - ---------- + ------- out : list of strings List of unique text names in the binary file. @@ -210,7 +210,7 @@ def get_data(self, idx=None, kswrkstpkper=None, totim=None): The simulation time. (default is None) Returns - ---------- + ------- data : numpy record array Array has size (nitems). @@ -288,7 +288,7 @@ def get_ts(self, irec=0, iconn=0, klay=0, istr=0): (default is 0) Returns - ---------- + ------- out : numpy recarray Array has size (ntimes, nitems). The first column in the data array will contain time (totim). nitems is 2 for stage diff --git a/flopy/utils/util_array.py b/flopy/utils/util_array.py index b307a2353..637036281 100644 --- a/flopy/utils/util_array.py +++ b/flopy/utils/util_array.py @@ -708,7 +708,7 @@ def plot( List of unique values to be excluded from the plot. Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. @@ -1520,7 +1520,7 @@ def plot( extracted. (default is zero). Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. @@ -1974,7 +1974,7 @@ def plot( List of unique values to be excluded from the plot. Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. diff --git a/flopy/utils/util_list.py b/flopy/utils/util_list.py index 2aa3e5a5f..968547bb6 100644 --- a/flopy/utils/util_list.py +++ b/flopy/utils/util_list.py @@ -964,7 +964,7 @@ def plot( List of unique values to be excluded from the plot. Returns - ---------- + ------- out : list Empty list is returned if filename_base is not None. Otherwise a list of matplotlib.pyplot.axis is returned. @@ -1015,7 +1015,7 @@ def to_array(self, kper=0, mask=False): mask : boolean return array with np.nan instead of zero Returns - ---------- + ------- out : dict of numpy.ndarrays Dictionary of 3-D numpy arrays containing the stress period data for a selected stress period. The dictionary keys are the MfList dtype diff --git a/flopy/utils/utils_def.py b/flopy/utils/utils_def.py index 1a4cd56dd..421222394 100644 --- a/flopy/utils/utils_def.py +++ b/flopy/utils/utils_def.py @@ -151,7 +151,7 @@ def get_util2d_shape_for_layer(model, layer=0): layer (base 0) for which Util2d shape is sought. Returns - --------- + ------- (nrow,ncol) : tuple of ints util2d shape for the given layer """ @@ -185,7 +185,7 @@ def get_unitnumber_from_ext_unit_dict( Default is 0, in which case the returned output file is None. Returns - --------- + ------- unitnumber : int file unit number for the given modflow package (or None) filenames : list @@ -218,7 +218,7 @@ def type_from_iterable(_iter, index=0, _type=int, default_val=0): default_val : default value (0) Returns - ---------- + ------- val : value of type _type, or default_val """ try: diff --git a/pyproject.toml b/pyproject.toml index 3d9e0607e..067aeddc8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -130,7 +130,12 @@ extend-include = [ ] [tool.ruff.lint] -select = ["F", "E", "I001"] +select = [ + "D409", # pydocstyle - section-underline-matches-section-length + "E", # pycodestyle error + "F", # Pyflakes + "I001", # isort - unsorted-imports +] ignore = [ "E402", # module level import not at top of file "E501", # line too long TODO FIXME From d81d7c089f0688173f25c1f6d1e860e08c3a17ba Mon Sep 17 00:00:00 2001 From: mickey-tsai <126633857+mickey-tsai@users.noreply.github.com> Date: Fri, 7 Jun 2024 03:03:48 +0800 Subject: [PATCH 09/57] fix(vtk): fix __transient_vector access (#2209) the key should be `per` not `d` --- flopy/export/vtk.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flopy/export/vtk.py b/flopy/export/vtk.py index 07024a5b8..96703dc67 100644 --- a/flopy/export/vtk.py +++ b/flopy/export/vtk.py @@ -1489,7 +1489,7 @@ def write(self, f: Union[str, os.PathLike], kper=None): self.add_array(array, name) if per in self.__transient_vector: - d = self.__transient_vector[d] + d = self.__transient_vector[per] for name, vector in d.items(): self.add_vector(vector, name) From ea3e475c2a566872670d067b7c611861d4be8c54 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Fri, 7 Jun 2024 07:24:35 -0400 Subject: [PATCH 10/57] docs(gridgen): fix smoothing level kwarg names in docstring (#2212) --- flopy/utils/gridgen.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/flopy/utils/gridgen.py b/flopy/utils/gridgen.py index 9fb46f4fc..d93e46c10 100644 --- a/flopy/utils/gridgen.py +++ b/flopy/utils/gridgen.py @@ -196,11 +196,11 @@ class Gridgen: where intermediate layers are inactive. (default is False) **kwargs - verical_smoothing_level : int + smoothing_level_vertical : int maximum level difference between two vertically adjacent cells. Adjust with caution, as adjustments can cause unexpected results to simulated flows - horizontal_smoothing_level : int + smoothing_level_horizontal : int maximum level difference between two horizontally adjacent cells. Adjust with caution, as adjustments can cause unexpected results to simulated flows From 0d9947eb8301561569676d4e3bdbc28a869e5bad Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Sat, 8 Jun 2024 00:32:20 +1200 Subject: [PATCH 11/57] refactor(exceptions): raise NotImplementedError where appropriate (#2213) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR raises NotImplementedError where appropriate, for example abstract methods. Several instances where a comment or message printing "not implemented" are changed to raise this exception instead. A few other exceptions from flopy/mbase.py are reclassified to raise TypeError where some of the expected types are missing. --- flopy/mbase.py | 35 ++++++++++++++-------------------- flopy/modflow/mf.py | 6 ++---- flopy/modflow/mfmlt.py | 8 ++++---- flopy/modflow/mfpval.py | 8 ++++---- flopy/modflow/mfswr1.py | 11 ++++------- flopy/modflow/mfzon.py | 8 ++++---- flopy/modpath/mp6.py | 3 +-- flopy/modpath/mp7.py | 3 +-- flopy/mt3d/mt.py | 3 +-- flopy/pakbase.py | 3 +-- flopy/seawat/swt.py | 3 +-- flopy/utils/datafile.py | 4 ++-- flopy/utils/formattedfile.py | 2 +- flopy/utils/observationfile.py | 4 ++-- flopy/utils/util_list.py | 2 -- 15 files changed, 42 insertions(+), 61 deletions(-) diff --git a/flopy/mbase.py b/flopy/mbase.py index b36cf2555..525d754e3 100644 --- a/flopy/mbase.py +++ b/flopy/mbase.py @@ -956,8 +956,8 @@ def remove_output( self.output_binflag.pop(i) self.output_packages.pop(i) else: - msg = " either fname or unit must be passed to remove_output()" - raise Exception(msg) + msg = "either fname or unit must be passed to remove_output()" + raise TypeError(msg) def get_output( self, fname: Optional[Union[str, os.PathLike]] = None, unit=None @@ -985,8 +985,8 @@ def get_output( return self.output_fnames[i] return None else: - msg = " either fname or unit must be passed to get_output()" - raise Exception(msg) + msg = "either fname or unit must be passed to get_output()" + raise TypeError(msg) def set_output_attribute( self, @@ -1020,11 +1020,10 @@ def set_output_attribute( idx = i break else: - msg = ( - " either fname or unit must be passed " + raise TypeError( + "either fname or unit must be passed " "to set_output_attribute()" ) - raise Exception(msg) if attr is not None: if idx is not None: for key, value in attr.items: @@ -1065,8 +1064,8 @@ def get_output_attribute( idx = i break else: - raise Exception( - " either fname or unit must be passed " + raise TypeError( + "either fname or unit must be passed " "to set_output_attribute()" ) v = None @@ -1147,8 +1146,8 @@ def remove_external( if u == unit: plist.append(i) else: - msg = " either fname or unit must be passed to remove_external()" - raise Exception(msg) + msg = "either fname or unit must be passed to remove_external()" + raise TypeError(msg) # remove external file j = 0 for i in plist: @@ -1476,10 +1475,8 @@ def run_model( normal_msg=normal_msg, ) - def load_results(self): - print("load_results not implemented") - - return None + def load_results(self, **kwargs): + raise NotImplementedError("load_results not implemented") def write_input(self, SelPackList=False, check=False): """ @@ -1543,18 +1540,14 @@ def write_name_file(self): Every Package needs its own writenamefile function """ - raise Exception( - "IMPLEMENTATION ERROR: writenamefile must be overloaded" - ) + raise NotImplementedError("write_name_file must be overloaded") def set_model_units(self): """ Every model needs its own set_model_units method """ - raise Exception( - "IMPLEMENTATION ERROR: set_model_units must be overloaded" - ) + raise NotImplementedError("set_model_units must be overloaded") @property def name(self): diff --git a/flopy/modflow/mf.py b/flopy/modflow/mf.py index c02ecf839..40f279095 100644 --- a/flopy/modflow/mf.py +++ b/flopy/modflow/mf.py @@ -35,8 +35,7 @@ def __repr__(self): return "Global Package class" def write_file(self): - # Not implemented for global class - return + raise NotImplementedError class ModflowList(Package): @@ -53,8 +52,7 @@ def __repr__(self): return "List Package class" def write_file(self): - # Not implemented for list class - return + raise NotImplementedError class Modflow(BaseModel): diff --git a/flopy/modflow/mfmlt.py b/flopy/modflow/mfmlt.py index 6a84519b1..5073420fd 100644 --- a/flopy/modflow/mfmlt.py +++ b/flopy/modflow/mfmlt.py @@ -91,16 +91,16 @@ def write_file(self): """ Write the package file. - Returns - ------- - None + Raises + ------ + NotImplementedError Notes ----- Not implemented because parameters are only supported on load """ - pass + raise NotImplementedError @classmethod def load(cls, f, model, nrow=None, ncol=None, ext_unit_dict=None): diff --git a/flopy/modflow/mfpval.py b/flopy/modflow/mfpval.py index 151cf46a3..ff65054de 100644 --- a/flopy/modflow/mfpval.py +++ b/flopy/modflow/mfpval.py @@ -92,16 +92,16 @@ def write_file(self): """ Write the package file. - Returns - ------- - None + Raises + ------ + NotImplementedError Notes ----- Not implemented because parameters are only supported on load """ - pass + raise NotImplementedError def __getitem__(self, item): """ diff --git a/flopy/modflow/mfswr1.py b/flopy/modflow/mfswr1.py index 9e6c815cc..ae628a9c5 100644 --- a/flopy/modflow/mfswr1.py +++ b/flopy/modflow/mfswr1.py @@ -87,15 +87,12 @@ def write_file(self): """ Write the package file. - Returns - ------- - None + Raises + ------ + NotImplementedError """ - print("SWR1 write method not implemented yet") - # f = open(self.fn_path, 'w') - # f.write('{0}\n'.format(self.heading)) - # f.close() + raise NotImplementedError("SWR1 write method not implemented yet") @classmethod def load(cls, f, model, ext_unit_dict=None): diff --git a/flopy/modflow/mfzon.py b/flopy/modflow/mfzon.py index 891840d6b..ca63a8efd 100644 --- a/flopy/modflow/mfzon.py +++ b/flopy/modflow/mfzon.py @@ -95,16 +95,16 @@ def write_file(self): """ Write the package file. - Returns - ------- - None + Raises + ------ + NotImplementedError Notes ----- Not implemented because parameters are only supported on load """ - return + raise NotImplementedError @classmethod def load(cls, f, model, nrow=None, ncol=None, ext_unit_dict=None): diff --git a/flopy/modpath/mp6.py b/flopy/modpath/mp6.py index fe4c96868..e1c4b2c70 100644 --- a/flopy/modpath/mp6.py +++ b/flopy/modpath/mp6.py @@ -21,8 +21,7 @@ def __init__(self, model, extension="list", listunit=7): return def write_file(self): - # Not implemented for list class - return + raise NotImplementedError class Modpath6(BaseModel): diff --git a/flopy/modpath/mp7.py b/flopy/modpath/mp7.py index 618ea20aa..be810c233 100644 --- a/flopy/modpath/mp7.py +++ b/flopy/modpath/mp7.py @@ -34,8 +34,7 @@ def __init__(self, model, extension="list", unitnumber=None): return def write_file(self): - # Not implemented for list class - return + raise NotImplementedError class Modpath7(BaseModel): diff --git a/flopy/mt3d/mt.py b/flopy/mt3d/mt.py index b31c5c74b..ce295791a 100644 --- a/flopy/mt3d/mt.py +++ b/flopy/mt3d/mt.py @@ -36,8 +36,7 @@ def __repr__(self): return "List package class" def write_file(self): - # Not implemented for list class - return + raise NotImplementedError class Mt3dms(BaseModel): diff --git a/flopy/pakbase.py b/flopy/pakbase.py index e71463021..bf153da5d 100644 --- a/flopy/pakbase.py +++ b/flopy/pakbase.py @@ -861,8 +861,7 @@ def write_file(self, f=None, check=False): Every Package needs its own write_file function """ - print("IMPLEMENTATION ERROR: write_file must be overloaded") - return + raise NotImplementedError("write_file must be overloaded") @staticmethod def load( diff --git a/flopy/seawat/swt.py b/flopy/seawat/swt.py index 689ff5af0..82763bf7b 100644 --- a/flopy/seawat/swt.py +++ b/flopy/seawat/swt.py @@ -24,8 +24,7 @@ def __repr__(self): return "List package class" def write_file(self): - # Not implemented for list class - return + raise NotImplementedError class Seawat(BaseModel): diff --git a/flopy/utils/datafile.py b/flopy/utils/datafile.py index d5043f2f0..ae6263cc7 100644 --- a/flopy/utils/datafile.py +++ b/flopy/utils/datafile.py @@ -409,7 +409,7 @@ def _build_index(self): Build the recordarray and iposarray, which maps the header information to the position in the formatted file. """ - raise Exception( + raise NotImplementedError( "Abstract method _build_index called in LayerFile. " "This method needs to be overridden." ) @@ -584,7 +584,7 @@ def _read_data(self, shp): Read data from file """ - raise Exception( + raise NotImplementedError( "Abstract method _read_data called in LayerFile. " "This method needs to be overridden." ) diff --git a/flopy/utils/formattedfile.py b/flopy/utils/formattedfile.py index 784265037..29de5d5fa 100644 --- a/flopy/utils/formattedfile.py +++ b/flopy/utils/formattedfile.py @@ -175,7 +175,7 @@ def _get_text_header(self): Return a text header object containing header formatting information """ - raise Exception( + raise NotImplementedError( "Abstract method _get_text_header called in FormattedLayerFile. " "This method needs to be overridden." ) diff --git a/flopy/utils/observationfile.py b/flopy/utils/observationfile.py index 73325e113..c4fc30710 100644 --- a/flopy/utils/observationfile.py +++ b/flopy/utils/observationfile.py @@ -236,7 +236,7 @@ def _build_dtype(self): Build the recordarray and iposarray, which maps the header information to the position in the formatted file. """ - raise Exception( + raise NotImplementedError( "Abstract method _build_dtype called in BinaryFiles. " "This method needs to be overridden." ) @@ -246,7 +246,7 @@ def _build_index(self): Build the recordarray and iposarray, which maps the header information to the position in the formatted file. """ - raise Exception( + raise NotImplementedError( "Abstract method _build_index called in BinaryFiles. " "This method needs to be overridden." ) diff --git a/flopy/utils/util_list.py b/flopy/utils/util_list.py index 968547bb6..0ce2916b6 100644 --- a/flopy/utils/util_list.py +++ b/flopy/utils/util_list.py @@ -611,8 +611,6 @@ def __setitem__(self, kper, data): f"MfList error: unsupported data type: {type(data)}" ) - # raise NotImplementedError("MfList.__setitem__() not implemented") - def __fromfile(self, f): # d = np.fromfile(f,dtype=self.dtype,count=count) try: From 1b430bb52186c38dc5efb97a92d220b0b75cf5ac Mon Sep 17 00:00:00 2001 From: martclanor Date: Sat, 8 Jun 2024 04:38:00 +0200 Subject: [PATCH 12/57] test(gridintersect): clarify intersection of line with grid (#2216) This PR addresses the following issue: #2214 --- autotest/test_gridintersect.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/autotest/test_gridintersect.py b/autotest/test_gridintersect.py index dc7a7656f..43d8237fb 100644 --- a/autotest/test_gridintersect.py +++ b/autotest/test_gridintersect.py @@ -1214,10 +1214,10 @@ def test_point_offset_rot_structured_grid(): @requires_pkg("shapely") def test_linestring_offset_rot_structured_grid(): sgr = get_rect_grid(angrot=45.0, xyoffset=10.0) - ls = LineString([(5, 10.0 + np.sqrt(200.0)), (15, 10.0 + np.sqrt(200.0))]) + ls = LineString([(5, 25), (15, 25)]) ix = GridIntersect(sgr, method="structured") result = ix.intersect(ls) - assert len(result) == 2 + assert len(result) == 3 # check empty result when using local model coords ix = GridIntersect(sgr, method="structured", local=True) result = ix.intersect(ls) From 7a10205b8595f3324c4688a031d4cb096884af15 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Mon, 10 Jun 2024 07:59:41 -0400 Subject: [PATCH 13/57] chore(deps): require jupyter_client>=8.4.0 for now (#2217) * avoid datetime.utcnow() deprecation warning * motivation: fix broken nbsphinx galleries on RTD * alternative: deprecation warning filter in examples? --- etc/environment.yml | 1 + pyproject.toml | 1 + 2 files changed, 2 insertions(+) diff --git a/etc/environment.yml b/etc/environment.yml index c16a75b5b..b3a489451 100644 --- a/etc/environment.yml +++ b/etc/environment.yml @@ -18,6 +18,7 @@ dependencies: - flaky - filelock - jupyter + - jupyter_client>=8.4.0 - jupytext - pip: - git+https://github.com/MODFLOW-USGS/modflow-devtools.git diff --git a/pyproject.toml b/pyproject.toml index 067aeddc8..066b93b74 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -47,6 +47,7 @@ test = [ "flaky", "filelock", "jupyter", + "jupyter_client >=8.4.0", # avoid datetime.utcnow() deprecation warning "jupytext", "modflow-devtools", "pytest !=8.1.0", From 5dabe6d955b28bf3b627fcc707d35d51132ced17 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Mon, 10 Jun 2024 15:51:57 -0400 Subject: [PATCH 14/57] test: reenable parametrization in gridgen tests, update markers (#2219) * devtools now replaces square brackets from parametrization in path names * also need to name_map pyshp -> shapefile since requires_pkg is strict now --- autotest/regression/test_mf6.py | 2 +- autotest/test_export.py | 30 +++--- autotest/test_geospatial_util.py | 2 +- autotest/test_gridgen.py | 166 +++++++++++++++---------------- autotest/test_modpathfile.py | 2 +- autotest/test_mp6.py | 2 +- autotest/test_particledata.py | 2 +- autotest/test_sfr.py | 2 +- autotest/test_shapefile_utils.py | 4 +- 9 files changed, 101 insertions(+), 111 deletions(-) diff --git a/autotest/regression/test_mf6.py b/autotest/regression/test_mf6.py index 17824801a..51c79dc00 100644 --- a/autotest/regression/test_mf6.py +++ b/autotest/regression/test_mf6.py @@ -2317,7 +2317,7 @@ def test035_create_tests_fhb(function_tmpdir, example_data_path): @requires_exe("mf6") -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) @pytest.mark.regression def test006_create_tests_gwf3_disv(function_tmpdir, example_data_path): # init paths diff --git a/autotest/test_export.py b/autotest/test_export.py index 43d70685d..6b935527f 100644 --- a/autotest/test_export.py +++ b/autotest/test_export.py @@ -177,7 +177,7 @@ def unstructured_grid(example_data_path): ) -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) @pytest.mark.parametrize("pathlike", (True, False)) def test_output_helper_shapefile_export( pathlike, function_tmpdir, example_data_path @@ -202,7 +202,7 @@ def test_output_helper_shapefile_export( ) -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) @pytest.mark.slow def test_freyberg_export(function_tmpdir, example_data_path): # steady state @@ -296,7 +296,7 @@ def test_freyberg_export(function_tmpdir, example_data_path): assert part.read_text() == wkt -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) @pytest.mark.parametrize("missing_arrays", [True, False]) @pytest.mark.slow def test_disu_export(function_tmpdir, missing_arrays): @@ -353,7 +353,7 @@ def test_export_output(crs, function_tmpdir, example_data_path): assert read_crs == get_authority_crs(4326) -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) def test_write_gridlines_shapefile(function_tmpdir): import shapefile @@ -379,7 +379,7 @@ def test_write_gridlines_shapefile(function_tmpdir): assert len(sf) == 22 -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) def test_export_shapefile_polygon_closed(function_tmpdir): from shapefile import Reader @@ -501,7 +501,7 @@ def test_netcdf_classmethods(function_tmpdir, example_data_path): new_f.nc.close() -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) def test_shapefile_ibound(function_tmpdir, example_data_path): from shapefile import Reader @@ -524,7 +524,7 @@ def test_shapefile_ibound(function_tmpdir, example_data_path): shape.close() -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) @pytest.mark.slow @pytest.mark.parametrize("namfile", namfiles()) def test_shapefile(function_tmpdir, namfile): @@ -549,7 +549,7 @@ def test_shapefile(function_tmpdir, namfile): ), f"wrong number of records in shapefile {fnc_name}" -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) @pytest.mark.slow @pytest.mark.parametrize("namfile", namfiles()) def test_shapefile_export_modelgrid_override(function_tmpdir, namfile): @@ -616,7 +616,7 @@ def test_export_netcdf(function_tmpdir, namfile): nc.close() -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) def test_export_array2(function_tmpdir): nrow = 7 ncol = 11 @@ -650,7 +650,7 @@ def test_export_array2(function_tmpdir): assert os.path.isfile(filename), "did not create array shapefile" -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_export_array_contours_structured(function_tmpdir): nrow = 7 ncol = 11 @@ -686,7 +686,7 @@ def test_export_array_contours_structured(function_tmpdir): assert os.path.isfile(filename), "did not create contour shapefile" -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_export_array_contours_unstructured( function_tmpdir, unstructured_grid ): @@ -712,7 +712,7 @@ def test_export_array_contours_unstructured( from autotest.test_gridgen import sim_disu_diff_layers -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_export_array_contours_unstructured_diff_layers( function_tmpdir, sim_disu_diff_layers ): @@ -741,7 +741,7 @@ def test_export_array_contours_unstructured_diff_layers( # plt.show() -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_export_contourf(function_tmpdir, example_data_path): from shapefile import Reader @@ -784,7 +784,7 @@ def test_export_contourf(function_tmpdir, example_data_path): @pytest.mark.mf6 -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_export_contours(function_tmpdir, example_data_path): from shapefile import Reader @@ -952,7 +952,7 @@ def cellid(k, i, j, nrow, ncol): assert np.abs(it - it6) < 1e-6 -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) @pytest.mark.slow def test_export_huge_shapefile(function_tmpdir): nlay = 2 diff --git a/autotest/test_geospatial_util.py b/autotest/test_geospatial_util.py index 3783fde2b..9132e1d1b 100644 --- a/autotest/test_geospatial_util.py +++ b/autotest/test_geospatial_util.py @@ -153,7 +153,7 @@ def test_import_geospatial_utils(): ) -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_geospatial_collection_load_shpfile(example_data_path): # with Path shp = example_data_path / "freyberg" / "gis" / "bedrock_outcrop_hole.shp" diff --git a/autotest/test_gridgen.py b/autotest/test_gridgen.py index 064bd7064..dc13a9204 100644 --- a/autotest/test_gridgen.py +++ b/autotest/test_gridgen.py @@ -59,98 +59,88 @@ def get_structured_grid(): @requires_exe("gridgen") -@requires_pkg("shapefile") -# GRIDGEN seems not to like paths containing "[" or "]", as -# function_tmpdir does with parametrization, do it manually -# @pytest.mark.parametrize("grid_type", ["vertex", "unstructured"]) -def test_add_active_domain(function_tmpdir): # , grid_type): +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) +@pytest.mark.parametrize("grid_type", ["vertex", "unstructured"]) +def test_add_active_domain(function_tmpdir, grid_type): bgrid = get_structured_grid() - # test providing active domain various ways - for grid_type in ["vertex", "unstructured"]: - grids = [] - for feature in [ - [[[(0, 0), (0, 60), (40, 80), (60, 0), (0, 0)]]], - function_tmpdir / "ad0.shp", - function_tmpdir / "ad0", - "ad0.shp", - "ad0", - ]: - print( - "Testing add_active_domain() for", - grid_type, - "grid with features", - feature, - ) - gridgen = Gridgen(bgrid, model_ws=function_tmpdir) - gridgen.add_active_domain( - feature, - range(bgrid.nlay), - ) - gridgen.build() - grid = ( - VertexGrid(**gridgen.get_gridprops_vertexgrid()) - if grid_type == "vertex" - else UnstructuredGrid( - **gridgen.get_gridprops_unstructuredgrid() - ) - ) - grid.plot() - grids.append(grid) - # plt.show() - - assert grid.nnodes < bgrid.nnodes - assert not np.array_equal(grid.ncpl, bgrid.ncpl) - assert all(np.array_equal(grid.ncpl, g.ncpl) for g in grids) - assert all(grid.nnodes == g.nnodes for g in grids) + # test providing active domain in various ways + grids = [] + for feature in [ + [[[(0, 0), (0, 60), (40, 80), (60, 0), (0, 0)]]], + function_tmpdir / "ad0.shp", + function_tmpdir / "ad0", + "ad0.shp", + "ad0", + ]: + print( + "Testing add_active_domain() for", + grid_type, + "grid with features", + feature, + ) + gridgen = Gridgen(bgrid, model_ws=function_tmpdir) + gridgen.add_active_domain( + feature, + range(bgrid.nlay), + ) + gridgen.build() + grid = ( + VertexGrid(**gridgen.get_gridprops_vertexgrid()) + if grid_type == "vertex" + else UnstructuredGrid(**gridgen.get_gridprops_unstructuredgrid()) + ) + grid.plot() + grids.append(grid) + # plt.show() + + assert grid.nnodes < bgrid.nnodes + assert not np.array_equal(grid.ncpl, bgrid.ncpl) + assert all(np.array_equal(grid.ncpl, g.ncpl) for g in grids) + assert all(grid.nnodes == g.nnodes for g in grids) @requires_exe("gridgen") -@requires_pkg("shapefile") -# GRIDGEN seems not to like paths containing "[" or "]", as -# function_tmpdir does with parametrization, do it manually -# @pytest.mark.parametrize("grid_type", ["vertex", "unstructured"]) -def test_add_refinement_feature(function_tmpdir): # , grid_type): +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) +@pytest.mark.parametrize("grid_type", ["vertex", "unstructured"]) +def test_add_refinement_feature(function_tmpdir, grid_type): bgrid = get_structured_grid() - # test providing refinement feature various ways - for grid_type in ["vertex", "unstructured"]: - grids = [] - for features in [ - [[[(0, 0), (0, 60), (40, 80), (60, 0), (0, 0)]]], - function_tmpdir / "rf0.shp", - function_tmpdir / "rf0", - "rf0.shp", - "rf0", - ]: - print( - "Testing add_refinement_feature() for", - grid_type, - "grid with features", - features, - ) - gridgen = Gridgen(bgrid, model_ws=function_tmpdir) - gridgen.add_refinement_features( - features, - "polygon", - 1, - range(bgrid.nlay), - ) - gridgen.build() - grid = ( - VertexGrid(**gridgen.get_gridprops_vertexgrid()) - if grid_type == "vertex" - else UnstructuredGrid( - **gridgen.get_gridprops_unstructuredgrid() - ) - ) - grid.plot() - # plt.show() - - assert grid.nnodes > bgrid.nnodes - assert not np.array_equal(grid.ncpl, bgrid.ncpl) - assert all(np.array_equal(grid.ncpl, g.ncpl) for g in grids) - assert all(grid.nnodes == g.nnodes for g in grids) + # test providing refinement features in various ways + grids = [] + for features in [ + [[[(0, 0), (0, 60), (40, 80), (60, 0), (0, 0)]]], + function_tmpdir / "rf0.shp", + function_tmpdir / "rf0", + "rf0.shp", + "rf0", + ]: + print( + "Testing add_refinement_feature() for", + grid_type, + "grid with features", + features, + ) + gridgen = Gridgen(bgrid, model_ws=function_tmpdir) + gridgen.add_refinement_features( + features, + "polygon", + 1, + range(bgrid.nlay), + ) + gridgen.build() + grid = ( + VertexGrid(**gridgen.get_gridprops_vertexgrid()) + if grid_type == "vertex" + else UnstructuredGrid(**gridgen.get_gridprops_unstructuredgrid()) + ) + grid.plot() + # plt.show() + + assert grid.nnodes > bgrid.nnodes + assert not np.array_equal(grid.ncpl, bgrid.ncpl) + assert all(np.array_equal(grid.ncpl, g.ncpl) for g in grids) + assert all(grid.nnodes == g.nnodes for g in grids) @pytest.mark.slow @@ -364,7 +354,7 @@ def sim_disu_diff_layers(function_tmpdir): @pytest.mark.slow @requires_exe("mf6", "gridgen") -@requires_pkg("shapely", "shapefile") +@requires_pkg("shapely", "pyshp", name_map={"pyshp": "shapefile"}) def test_mf6disu(sim_disu_diff_layers): sim = sim_disu_diff_layers ws = sim.sim_path @@ -474,7 +464,7 @@ def test_mf6disu(sim_disu_diff_layers): @pytest.mark.slow @requires_exe("mfusg", "gridgen") -@requires_pkg("shapely", "shapefile") +@requires_pkg("shapely", "pyshp", name_map={"pyshp": "shapefile"}) def test_mfusg(function_tmpdir): from shapely.geometry import Polygon @@ -855,7 +845,7 @@ def test_gridgen(function_tmpdir): @requires_exe("mf6", "gridgen") -@requires_pkg("shapely", "shapefile") +@requires_pkg("shapely", "pyshp", name_map={"pyshp": "shapefile"}) def test_flopy_issue_1492(function_tmpdir): """ Submitted by David Brakenhoff in diff --git a/autotest/test_modpathfile.py b/autotest/test_modpathfile.py index 32a0531ef..e2e46bae1 100644 --- a/autotest/test_modpathfile.py +++ b/autotest/test_modpathfile.py @@ -313,7 +313,7 @@ def test_get_destination_endpoint_data( @pytest.mark.parametrize("longfieldname", [True, False]) @requires_exe("mf6", "mp7") -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_write_shapefile(function_tmpdir, mp7_small, longfieldname): from shapefile import Reader diff --git a/autotest/test_mp6.py b/autotest/test_mp6.py index 73935a1d4..962cc1074 100644 --- a/autotest/test_mp6.py +++ b/autotest/test_mp6.py @@ -131,7 +131,7 @@ def test_mpsim(function_tmpdir, mp6_test_path): assert stllines[6].strip().split()[-1] == "p2" -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_get_destination_data(function_tmpdir, mp6_test_path): copy_modpath_files(mp6_test_path, function_tmpdir, "EXAMPLE.") copy_modpath_files(mp6_test_path, function_tmpdir, "EXAMPLE-3.") diff --git a/autotest/test_particledata.py b/autotest/test_particledata.py index ed1f167ab..d7fab204c 100644 --- a/autotest/test_particledata.py +++ b/autotest/test_particledata.py @@ -658,7 +658,7 @@ def test_nodeparticledata_to_prp_dis_1_per_face(): assert len(rpts) == num_cells * 6 -@requires_pkg("shapefile") +@requires_pkg("pyshp", name_map={"pyshp": "shapefile"}) def test_nodeparticledata_prp_disv_big(function_tmpdir): Lx = 10000.0 Ly = 10500.0 diff --git a/autotest/test_sfr.py b/autotest/test_sfr.py index 34a35ddc8..de08b964c 100644 --- a/autotest/test_sfr.py +++ b/autotest/test_sfr.py @@ -373,7 +373,7 @@ def test_const(sfr_data): assert True -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_export(function_tmpdir, sfr_data): m = Modflow() dis = ModflowDis(m, 1, 10, 10, lenuni=2, itmuni=4) diff --git a/autotest/test_shapefile_utils.py b/autotest/test_shapefile_utils.py index b347806f1..5d1292c7d 100644 --- a/autotest/test_shapefile_utils.py +++ b/autotest/test_shapefile_utils.py @@ -17,7 +17,7 @@ from .test_grid import minimal_unstructured_grid_info, minimal_vertex_grid_info -@requires_pkg("shapefile", "shapely") +@requires_pkg("pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_model_attributes_to_shapefile(example_data_path, function_tmpdir): # freyberg mf2005 model name = "freyberg" @@ -53,7 +53,7 @@ def test_model_attributes_to_shapefile(example_data_path, function_tmpdir): assert shpfile_path.exists() -@requires_pkg("pyproj", "shapefile", "shapely") +@requires_pkg("pyproj", "pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_write_grid_shapefile( minimal_unstructured_grid_info, minimal_vertex_grid_info, function_tmpdir ): From f26cbd4a0b4daf17538ef448f843561730136153 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Mon, 10 Jun 2024 17:59:46 -0400 Subject: [PATCH 15/57] ci: add concurrency groups to commit-triggered workflows (#2220) * only allow one concurrent workflow run per branch --- .github/workflows/commit.yml | 3 +++ .github/workflows/mf6.yml | 4 +++- .github/workflows/rtd.yml | 4 +++- 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml index 29da17835..0cc6dcbd3 100644 --- a/.github/workflows/commit.yml +++ b/.github/workflows/commit.yml @@ -5,6 +5,9 @@ on: branches: - master - develop +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true jobs: build: name: Build diff --git a/.github/workflows/mf6.yml b/.github/workflows/mf6.yml index 4489c41b2..389109e1a 100644 --- a/.github/workflows/mf6.yml +++ b/.github/workflows/mf6.yml @@ -8,7 +8,9 @@ on: branches: - master - develop - +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true jobs: test: diff --git a/.github/workflows/rtd.yml b/.github/workflows/rtd.yml index 4192123a9..7b7a406fb 100644 --- a/.github/workflows/rtd.yml +++ b/.github/workflows/rtd.yml @@ -6,7 +6,9 @@ on: branches: - master - develop - +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true jobs: rtd_build: From 3b95eec12b5c5bf85b4250f3e0c9568a2b657bff Mon Sep 17 00:00:00 2001 From: scottrp <45947939+scottrp@users.noreply.github.com> Date: Tue, 11 Jun 2024 08:03:10 -0700 Subject: [PATCH 16/57] feat(exg pkg binary file support) (#2205) * feat(exg pkg binary file support) * feat(binary file) * feat(exchange binary file) * fix(workflows): use latest version of modflow with notebook tests * fix(workflows) --- .docs/Notebooks/lgr_tutorial01.py | 39 +++++---- .github/workflows/rtd.yml | 7 +- autotest/regression/test_mf6.py | 16 +++- autotest/regression/test_mf6_pandas.py | 36 ++++---- flopy/mf6/coordinates/modeldimensions.py | 23 +++-- flopy/mf6/data/mfdataplist.py | 107 +++++++++++++---------- flopy/mf6/data/mfdatastorage.py | 40 +++------ flopy/mf6/data/mffileaccess.py | 71 +++++++++------ 8 files changed, 192 insertions(+), 147 deletions(-) diff --git a/.docs/Notebooks/lgr_tutorial01.py b/.docs/Notebooks/lgr_tutorial01.py index 72b9c12dd..172756ccf 100644 --- a/.docs/Notebooks/lgr_tutorial01.py +++ b/.docs/Notebooks/lgr_tutorial01.py @@ -462,23 +462,11 @@ # retrieve the exchange data from the lgr object exchangedata = lgr.get_exchange_data(angldegx=True, cdist=True) nexg = len(exchangedata) - - # When creating the exchange, which couples the child and parent - # models, use the xt3d option, which is an alternative to the - # ghost-node correction. This xt3d option was added as a new - # capability for the gwt-gwt and gwf-gwf exchanges in MODFLOW version 6.3.0. - exg = flopy.mf6.ModflowGwtgwt( - sim, - exgtype="GWT6-GWT6", - gwfmodelname1=gwfp.name, - gwfmodelname2=gwfc.name, - # xt3d=True, - auxiliary=["angldegx", "cdist"], - exgmnamea=pname, - exgmnameb=cname, - nexg=nexg, - exchangedata=exchangedata, - ) + exg_data = { + "filename": "exg_data.bin", + "data": exchangedata, + "binary": True, + } # Set up the parent model and use the lgr.parent object to # help provide the necessary information. @@ -522,6 +510,23 @@ ) sim.register_ims_package(ims_tran, [gwtp.name, gwtc.name]) + # When creating the exchange, which couples the child and parent + # models, use the xt3d option, which is an alternative to the + # ghost-node correction. This xt3d option was added as a new + # capability for the gwt-gwt and gwf-gwf exchanges in MODFLOW version 6.3.0. + exg = flopy.mf6.ModflowGwtgwt( + sim, + exgtype="GWT6-GWT6", + gwfmodelname1=gwfp.name, + gwfmodelname2=gwfc.name, + # xt3d=True, + auxiliary=["angldegx", "cdist"], + exgmnamea=pname, + exgmnameb=cname, + nexg=nexg, + exchangedata=exg_data, + ) + # couple flow and transport models gwfgwt_p = flopy.mf6.ModflowGwfgwt( sim, diff --git a/.github/workflows/rtd.yml b/.github/workflows/rtd.yml index 7b7a406fb..0096cc2ee 100644 --- a/.github/workflows/rtd.yml +++ b/.github/workflows/rtd.yml @@ -74,9 +74,14 @@ jobs: $PSDefaultParameterValues['*:ErrorAction']='Stop' powershell .github/install_opengl.ps1 - - name: Install MODFLOW executables + - name: Install Modflow-related executables uses: modflowpy/install-modflow-action@v1 + - name: Install Modflow dev build executables + uses: modflowpy/install-modflow-action@v1 + with: + repo: modflow6-nightly-build + - name: Run tutorial and example notebooks working-directory: autotest run: pytest -v -n auto test_notebooks.py diff --git a/autotest/regression/test_mf6.py b/autotest/regression/test_mf6.py index 51c79dc00..6dad5ce4c 100644 --- a/autotest/regression/test_mf6.py +++ b/autotest/regression/test_mf6.py @@ -2853,6 +2853,11 @@ def test006_create_tests_2models_gnc(function_tmpdir, example_data_path): ) sim.remove_package(exg_package.package_type) + exg_data = { + "filename": "exg_data.txt", + "data": exgrecarray, + "binary": True, + } exg_package = ModflowGwfgwf( sim, print_input=True, @@ -2860,7 +2865,7 @@ def test006_create_tests_2models_gnc(function_tmpdir, example_data_path): save_flows=True, auxiliary="testaux", nexg=36, - exchangedata=exgrecarray, + exchangedata=exg_data, exgtype="gwf6-gwf6", exgmnamea=model_name_1, exgmnameb=model_name_2, @@ -2881,6 +2886,7 @@ def test006_create_tests_2models_gnc(function_tmpdir, example_data_path): # change folder to save simulation sim.set_sim_path(function_tmpdir) + exg_package.exchangedata.set_record(exg_data) # write simulation to new location sim.write_simulation() @@ -4039,6 +4045,11 @@ def test006_2models_different_dis(function_tmpdir, example_data_path): exgrecarray = testutils.read_exchangedata( os.path.join(pth, "exg.txt"), 3, 2 ) + exg_data = { + "filename": "exg_data.bin", + "data": exgrecarray, + "binary": True, + } # build obs dictionary gwf_obs = { @@ -4055,7 +4066,7 @@ def test006_2models_different_dis(function_tmpdir, example_data_path): save_flows=True, auxiliary="testaux", nexg=9, - exchangedata=exgrecarray, + exchangedata=exg_data, exgtype="gwf6-gwf6", exgmnamea=model_name_1, exgmnameb=model_name_2, @@ -4077,6 +4088,7 @@ def test006_2models_different_dis(function_tmpdir, example_data_path): # change folder to save simulation sim.set_sim_path(function_tmpdir) + exg_package.exchangedata.set_record(exg_data) # write simulation to new location sim.write_simulation() diff --git a/autotest/regression/test_mf6_pandas.py b/autotest/regression/test_mf6_pandas.py index 183ed2ff0..7ef875ada 100644 --- a/autotest/regression/test_mf6_pandas.py +++ b/autotest/regression/test_mf6_pandas.py @@ -182,13 +182,13 @@ def test_pandas_001(function_tmpdir, example_data_path): assert well_data_pd.iloc[0, 1] == 0 assert well_data_pd.iloc[0, 2] == 4 assert well_data_pd.iloc[0, 3] == -2000.0 - assert well_data_pd["layer"][0] == 0 - assert well_data_pd["row"][0] == 0 - assert well_data_pd["column"][0] == 4 + assert well_data_pd["cellid_layer"][0] == 0 + assert well_data_pd["cellid_row"][0] == 0 + assert well_data_pd["cellid_column"][0] == 4 assert well_data_pd["q"][0] == -2000.0 - assert well_data_pd["layer"][1] == 0 - assert well_data_pd["row"][1] == 0 - assert well_data_pd["column"][1] == 7 + assert well_data_pd["cellid_layer"][1] == 0 + assert well_data_pd["cellid_row"][1] == 0 + assert well_data_pd["cellid_column"][1] == 7 assert well_data_pd["q"][1] == -2.0 well_data_rec = wel_package.stress_period_data.get_data(0) @@ -284,13 +284,13 @@ def test_pandas_001(function_tmpdir, example_data_path): assert well_data_pd_0.iloc[0, 1] == 0 assert well_data_pd_0.iloc[0, 2] == 4 assert well_data_pd_0.iloc[0, 3] == -2000.0 - assert well_data_pd_0["layer"][0] == 0 - assert well_data_pd_0["row"][0] == 0 - assert well_data_pd_0["column"][0] == 4 + assert well_data_pd_0["cellid_layer"][0] == 0 + assert well_data_pd_0["cellid_row"][0] == 0 + assert well_data_pd_0["cellid_column"][0] == 4 assert well_data_pd_0["q"][0] == -2000.0 - assert well_data_pd_0["layer"][1] == 0 - assert well_data_pd_0["row"][1] == 0 - assert well_data_pd_0["column"][1] == 7 + assert well_data_pd_0["cellid_layer"][1] == 0 + assert well_data_pd_0["cellid_row"][1] == 0 + assert well_data_pd_0["cellid_column"][1] == 7 assert well_data_pd_0["q"][1] == -2.0 well_data_pd = test_wel.stress_period_data.get_dataframe(1) assert isinstance(well_data_pd, pd.DataFrame) @@ -298,13 +298,13 @@ def test_pandas_001(function_tmpdir, example_data_path): assert well_data_pd.iloc[0, 1] == 0 assert well_data_pd.iloc[0, 2] == 4 assert well_data_pd.iloc[0, 3] == -1000.0 - assert well_data_pd["layer"][0] == 0 - assert well_data_pd["row"][0] == 0 - assert well_data_pd["column"][0] == 4 + assert well_data_pd["cellid_layer"][0] == 0 + assert well_data_pd["cellid_row"][0] == 0 + assert well_data_pd["cellid_column"][0] == 4 assert well_data_pd["q"][0] == -1000.0 - assert well_data_pd["layer"][1] == 0 - assert well_data_pd["row"][1] == 0 - assert well_data_pd["column"][1] == 7 + assert well_data_pd["cellid_layer"][1] == 0 + assert well_data_pd["cellid_row"][1] == 0 + assert well_data_pd["cellid_column"][1] == 7 assert well_data_pd["q"][1] == -20.0 test_riv = test_mod.get_package("riv") riv_data_pd = test_riv.stress_period_data.get_dataframe(0) diff --git a/flopy/mf6/coordinates/modeldimensions.py b/flopy/mf6/coordinates/modeldimensions.py index b80113624..013efafd8 100644 --- a/flopy/mf6/coordinates/modeldimensions.py +++ b/flopy/mf6/coordinates/modeldimensions.py @@ -102,6 +102,15 @@ def model_subspace_size(self, subspace_string="", data_item_num=None): subspace_string ) + def get_cellid_size(self, data_item_name): + model_num = DatumUtil.cellid_model_num( + data_item_name, + self.structure.model_data, + self.package_dim.model_dim, + ) + model_grid = self.get_model_grid(model_num=model_num) + return model_grid.get_num_spatial_coordinates() + def get_model_dim(self, data_item_num, model_num=None): if ( self.package_dim.model_dim is None @@ -111,9 +120,14 @@ def get_model_dim(self, data_item_num, model_num=None): return self.package_dim.model_dim[0] else: if model_num is None: - model_num = self.structure.data_item_structures[data_item_num][ - -1 - ] + # see if the name of the data item indicates which model to use + item_name = self.structure.data_item_structures[ + data_item_num + ].name + if item_name[-2] == "m" and DatumUtil.is_int(item_name[-1]): + model_num = int(item_name[-1]) - 1 + else: + return self.package_dim.model_dim[0] if not ( len(self.structure.data_item_structures) > data_item_num ): @@ -133,8 +147,7 @@ def get_model_dim(self, data_item_num, model_num=None): f"{len(self.package_dim.model_dim)}." ) - if DatumUtil.is_int(model_num): - return self.package_dim.model_dim[int(model_num)] + return self.package_dim.model_dim[model_num] class PackageDimensions: diff --git a/flopy/mf6/data/mfdataplist.py b/flopy/mf6/data/mfdataplist.py index 507795e70..594cb4f0b 100644 --- a/flopy/mf6/data/mfdataplist.py +++ b/flopy/mf6/data/mfdataplist.py @@ -303,36 +303,42 @@ def _add_cellid_fields(self, data, keep_existing=False): columns = data.columns.tolist() if isinstance(self._mg, StructuredGrid): if ( - "layer" in columns - and "row" in columns - and "column" in columns + "cellid_layer" in columns + and "cellid_row" in columns + and "cellid_column" in columns ): data["cellid"] = data[ - ["layer", "row", "column"] + ["cellid_layer", "cellid_row", "cellid_column"] ].apply(tuple, axis=1) if not keep_existing: data = data.drop( - columns=["layer", "row", "column"] + columns=[ + "cellid_layer", + "cellid_row", + "cellid_column", + ] ) elif isinstance(self._mg, VertexGrid): cell_2 = None - if "cell" in columns: - cell_2 = "cell" + if "cellid_cell" in columns: + cell_2 = "cellid_cell" elif "ncpl" in columns: - cell_2 = "ncpl" - if cell_2 is not None and "layer" in columns: - data["cellid"] = data[["layer", cell_2]].apply( - tuple, axis=1 - ) + cell_2 = "cellid_ncpl" + if cell_2 is not None and "cellid_layer" in columns: + data["cellid"] = data[ + ["cellid_layer", cell_2] + ].apply(tuple, axis=1) if not keep_existing: - data = data.drop(columns=["layer", cell_2]) + data = data.drop( + columns=["cellid_layer", cell_2] + ) elif isinstance(self._mg, UnstructuredGrid): - if "node" in columns: - data["cellid"] = data[["node"]].apply( + if "cellid_node" in columns: + data["cellid"] = data[["cellid_node"]].apply( tuple, axis=1 ) if not keep_existing: - data = data.drop(columns=["node"]) + data = data.drop(columns=["cellid_node"]) else: raise MFDataException( "ERROR: Unrecognized model grid " @@ -408,14 +414,20 @@ def _build_data_header(self): # get the appropriate cellid column headings for the # model's discretization type if isinstance(self._mg, StructuredGrid): - self._append_type_list("layer", i_type, True) - self._append_type_list("row", i_type, True) - self._append_type_list("column", i_type, True) + self._append_type_list( + "cellid_layer", i_type, True + ) + self._append_type_list("cellid_row", i_type, True) + self._append_type_list( + "cellid_column", i_type, True + ) elif isinstance(self._mg, VertexGrid): - self._append_type_list("layer", i_type, True) - self._append_type_list("cell", i_type, True) + self._append_type_list( + "cellid_layer", i_type, True + ) + self._append_type_list("cellid_cell", i_type, True) elif isinstance(self._mg, UnstructuredGrid): - self._append_type_list("node", i_type, True) + self._append_type_list("cellid_node", i_type, True) else: raise MFDataException( "ERROR: Unrecognized model grid " @@ -496,42 +508,44 @@ def _untuple_cellids(self, pdata): try: pdata.insert( loc=field_idx, - column=self._unique_column_name(pdata, "layer"), + column=self._unique_column_name(pdata, "cellid_layer"), value=pdata.apply(lambda x: x[column_name][0], axis=1), ) except (ValueError, TypeError): self._untuple_manually( pdata, field_idx, - self._unique_column_name(pdata, "layer"), + self._unique_column_name(pdata, "cellid_layer"), column_name, 0, ) try: pdata.insert( loc=field_idx + 1, - column=self._unique_column_name(pdata, "row"), + column=self._unique_column_name(pdata, "cellid_row"), value=pdata.apply(lambda x: x[column_name][1], axis=1), ) except (ValueError, TypeError): self._untuple_manually( pdata, field_idx + 1, - self._unique_column_name(pdata, "row"), + self._unique_column_name(pdata, "cellid_row"), column_name, 1, ) try: pdata.insert( loc=field_idx + 2, - column=self._unique_column_name(pdata, "column"), + column=self._unique_column_name( + pdata, "cellid_column" + ), value=pdata.apply(lambda x: x[column_name][2], axis=1), ) except (ValueError, TypeError): self._untuple_manually( pdata, field_idx + 2, - self._unique_column_name(pdata, "column"), + self._unique_column_name(pdata, "cellid_column"), column_name, 2, ) @@ -539,48 +553,48 @@ def _untuple_cellids(self, pdata): try: pdata.insert( loc=field_idx, - column=self._unique_column_name(pdata, "layer"), + column=self._unique_column_name(pdata, "cellid_layer"), value=pdata.apply(lambda x: x[column_name][0], axis=1), ) except (ValueError, TypeError): self._untuple_manually( pdata, field_idx, - self._unique_column_name(pdata, "layer"), + self._unique_column_name(pdata, "cellid_layer"), column_name, 0, ) try: pdata.insert( loc=field_idx + 1, - column=self._unique_column_name(pdata, "cell"), + column=self._unique_column_name(pdata, "cellid_cell"), value=pdata.apply(lambda x: x[column_name][1], axis=1), ) except (ValueError, TypeError): self._untuple_manually( pdata, field_idx + 1, - self._unique_column_name(pdata, "cell"), + self._unique_column_name(pdata, "cellid_cell"), column_name, 1, ) elif isinstance(self._mg, UnstructuredGrid): - if column_name == "node": + if column_name == "cellid_node": # fixing a problem where node was specified as a tuple # make sure new column is named properly - column_name = "node_2" - pdata = pdata.rename(columns={"node": column_name}) + column_name = "cellid_node_2" + pdata = pdata.rename(columns={"cellid_node": column_name}) try: pdata.insert( loc=field_idx, - column=self._unique_column_name(pdata, "node"), + column=self._unique_column_name(pdata, "cellid_node"), value=pdata.apply(lambda x: x[column_name][0], axis=1), ) except (ValueError, TypeError): self._untuple_manually( pdata, field_idx, - self._unique_column_name(pdata, "node"), + self._unique_column_name(pdata, "cellid_node"), column_name, 0, ) @@ -1239,7 +1253,6 @@ def _save_binary_data(self, fd_data_file, data): file_access.write_binary_file( self._dataframe_to_recarray(data), fd_data_file, - self._model_or_sim.modeldiscrit, ) data_storage = self._get_storage_obj() data_storage.internal_data = None @@ -1281,13 +1294,12 @@ def _load_external_data(self, data_storage): ) np_data = file_access.read_binary_data_from_file( file_path, - self._model_or_sim.modeldiscrit, build_cellid=False, ) pd_data = pandas.DataFrame(np_data) if "col" in pd_data: # keep layer/row/column names consistent - pd_data = pd_data.rename(columns={"col": "column"}) + pd_data = pd_data.rename(columns={"col": "cellid_column"}) self._decrement_id_fields(pd_data) else: with open(file_path, "r") as fd_data_file: @@ -1439,16 +1451,17 @@ def _update_id_fields(self, id_fields, data_item_struct, data_frame): an item in the expected data structure and the data provided. """ if data_item_struct.numeric_index or data_item_struct.is_cellid: - if data_item_struct.name.lower() == "cellid": + name = data_item_struct.name.lower() + if name.startswith("cellid"): if isinstance(self._mg, StructuredGrid): - id_fields.append("layer") - id_fields.append("row") - id_fields.append("column") + id_fields.append(f"{name}_layer") + id_fields.append(f"{name}_row") + id_fields.append(f"{name}_column") elif isinstance(self._mg, VertexGrid): - id_fields.append("layer") - id_fields.append("cell") + id_fields.append(f"{name}_layer") + id_fields.append(f"{name}_cell") elif isinstance(self._mg, UnstructuredGrid): - id_fields.append("node") + id_fields.append(f"{name}_node") else: raise MFDataException( "ERROR: Unrecognized model grid " diff --git a/flopy/mf6/data/mfdatastorage.py b/flopy/mf6/data/mfdatastorage.py index 86fc50e24..8a5741353 100644 --- a/flopy/mf6/data/mfdatastorage.py +++ b/flopy/mf6/data/mfdatastorage.py @@ -1597,15 +1597,6 @@ def _build_recarray(self, data, key, autofill): self._verify_list(new_data) return new_data - def _get_cellid_size(self, data_item_name): - model_num = DatumUtil.cellid_model_num( - data_item_name, - self.data_dimensions.structure.model_data, - self.data_dimensions.package_dim.model_dim, - ) - model_grid = self.data_dimensions.get_model_grid(model_num=model_num) - return model_grid.get_num_spatial_coordinates() - def make_tuple_cellids(self, data): # convert cellids from individual layer, row, column fields into # tuples (layer, row, column) @@ -1616,7 +1607,7 @@ def make_tuple_cellids(self, data): new_line = [] for item, is_cellid in zip(line, self.recarray_cellid_list_ex): if is_cellid: - cellid_size = self._get_cellid_size( + cellid_size = self.data_dimensions.get_cellid_size( self._recarray_type_list[data_idx][0], ) current_cellid += (item,) @@ -1761,10 +1752,7 @@ def store_external( self._stress_period, ) file_access.write_binary_file( - self.layer_storage.first_item().internal_data, - fp, - self._model_or_sim.modeldiscrit, - precision="double", + self.layer_storage.first_item().internal_data, fp ) else: # make sure folder exists @@ -1802,15 +1790,6 @@ def store_external( # set as external data self.layer_storage.first_item().internal_data = None else: - # if self.layer_storage.in_shape(layer_new): - # factor = self.layer_storage[layer_new].factor - # if preserve_record: - # adjustment = multiplier / factor - # if adjustment != 1.0: - # convert numbers to be multiplied by the - # original factor - # data = data * adjustment - # store data externally in file data_size = self.get_data_size(layer_new) data_type = data_dim.structure.data_item_structures[0].type @@ -2030,9 +2009,7 @@ def external_to_internal( self._stress_period, ) if self.layer_storage[layer].binary: - data = file_access.read_binary_data_from_file( - read_file, self._model_or_sim.modeldiscrit - ) + data = file_access.read_binary_data_from_file(read_file) data_out = self._build_recarray(data, layer, False) else: with open(read_file) as fd_read_file: @@ -2144,7 +2121,7 @@ def _validate_cellid(self, arr_line, data_index, data_item): return False if arr_line is None: return False - cellid_size = self._get_cellid_size(data_item.name) + cellid_size = self.data_dimensions.get_cellid_size(data_item.name) model_grid = self.data_dimensions.get_model_grid() if cellid_size + data_index > len(arr_line): return False @@ -2291,7 +2268,7 @@ def _verify_list(self, data): # this is a cell id. verify that it contains the # correct number of integers if cellid_size is None: - cellid_size = self._get_cellid_size( + cellid_size = self.data_dimensions.get_cellid_size( self._recarray_type_list[index][0] ) if ( @@ -2833,6 +2810,7 @@ def build_type_list( data_item, data_set, data, + data_item_num=index, repeating_key=key, min_size=min_size, ) @@ -2873,7 +2851,9 @@ def build_type_list( ): # A cellid is a single entry (tuple) in the # recarray. Adjust dimensions accordingly. - size = self._get_cellid_size(data_item.name) + size = self.data_dimensions.get_cellid_size( + data_item.name + ) data_item.remove_cellid(resolved_shape, size) if not data_item.optional or not min_size: for index in range(0, resolved_shape[0]): @@ -2910,7 +2890,7 @@ def _append_type_lists(self, name, data_type, iscellid): if iscellid and self._model_or_sim.model_type is not None: # write each part of the cellid out as a separate entry # to _recarray_list_list_ex - cellid_size = self._get_cellid_size(name) + cellid_size = self.data_dimensions.get_cellid_size(name) # determine header for different grid types if cellid_size == 1: self._do_ex_list_append(name, int, iscellid) diff --git a/flopy/mf6/data/mffileaccess.py b/flopy/mf6/data/mffileaccess.py index 73f19f92b..e64ed695d 100644 --- a/flopy/mf6/data/mffileaccess.py +++ b/flopy/mf6/data/mffileaccess.py @@ -1046,18 +1046,14 @@ def __init__( self._last_line_info = [] self.simple_line = False - def read_binary_data_from_file( - self, read_file, modelgrid, precision="double", build_cellid=True - ): + def read_binary_data_from_file(self, read_file, build_cellid=True): # read from file - header, int_cellid_indexes, ext_cellid_indexes = self._get_header( - modelgrid, precision - ) + header, int_cellid_indexes, ext_cellid_indexes = self._get_header() file_array = np.fromfile(read_file, dtype=header, count=-1) if not build_cellid: return file_array # build data list for recarray - cellid_size = len(self._get_cell_header(modelgrid)) + cellid_size = {} data_list = [] for record in file_array: data_record = () @@ -1067,9 +1063,18 @@ def read_binary_data_from_file( if index in ext_cellid_indexes: current_cellid += (data_item - 1,) current_cellid_size += 1 - if current_cellid_size == cellid_size: - data_record += current_cellid - data_record = (data_record,) + rec_len = len(data_record) + if rec_len not in cellid_size: + data_item_struct = self.structure.data_item_structures[ + rec_len + ] + cellid_size[rec_len] = ( + self._data_dimensions.get_cellid_size( + data_item_struct.name + ) + ) + if current_cellid_size == cellid_size[rec_len]: + data_record += (current_cellid,) current_cellid = () current_cellid_size = 0 else: @@ -1077,18 +1082,14 @@ def read_binary_data_from_file( data_list.append(data_record) return data_list - def write_binary_file( - self, data, fname, modelgrid=None, precision="double" - ): + def write_binary_file(self, data, fname): fd = self._open_ext_file(fname, binary=True, write=True) - data_array = self._build_data_array(data, modelgrid, precision) + data_array = self._build_data_array(data) data_array.tofile(fd) fd.close() - def _build_data_array(self, data, modelgrid, precision): - header, int_cellid_indexes, ext_cellid_indexes = self._get_header( - modelgrid, precision - ) + def _build_data_array(self, data): + header, int_cellid_indexes, ext_cellid_indexes = self._get_header() data_list = [] for record in data: new_record = () @@ -1104,7 +1105,8 @@ def _build_data_array(self, data, modelgrid, precision): data_list.append(new_record) return np.array(data_list, dtype=header) - def _get_header(self, modelgrid, precision): + def _get_header(self): + np_int_type = np.int32 np_flt_type = np.float64 header = [] int_cellid_indexes = {} @@ -1112,14 +1114,21 @@ def _get_header(self, modelgrid, precision): ext_index = 0 for index, di_struct in enumerate(self.structure.data_item_structures): if di_struct.is_cellid: - cell_header = self._get_cell_header(modelgrid) + cell_header = self._get_cell_header( + di_struct, + self.structure.data_item_structures, + index, + ) header += cell_header int_cellid_indexes[index] = True for index in range(ext_index, ext_index + len(cell_header)): ext_cellid_indexes[index] = True ext_index += len(cell_header) elif not di_struct.optional: - header.append((di_struct.name, np_flt_type)) + if di_struct.type == DatumType.integer: + header.append((di_struct.name, np_int_type)) + else: + header.append((di_struct.name, np_flt_type)) ext_index += 1 elif di_struct.name == "aux": aux_var_names = ( @@ -1132,13 +1141,21 @@ def _get_header(self, modelgrid, precision): ext_index += 1 return header, int_cellid_indexes, ext_cellid_indexes - def _get_cell_header(self, modelgrid): - if modelgrid.grid_type == "structured": - return [("layer", np.int32), ("row", np.int32), ("col", np.int32)] - elif modelgrid.grid_type == "vertex": - return [("layer", np.int32), ("ncpl", np.int32)] + def _get_cell_header(self, data_item, data_set, index): + cellid_size = self._data_dimensions.get_cellid_size(data_item.name) + if cellid_size == 3: + return [ + (f"{data_item.name}_layer", np.int32), + (f"{data_item.name}_row", np.int32), + (f"{data_item.name}_column", np.int32), + ] + elif cellid_size == 2: + return [ + (f"{data_item.name}_layer", np.int32), + (f"{data_item.name}_cell", np.int32), + ] else: - return [("nodes", np.int32)] + return [(f"{data_item.name}_nodes", np.int32)] def load_from_package( self, first_line, file_handle, storage, pre_data_comments=None From 667774231a3c3e40fb68067331ead4b8a576cbee Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Tue, 11 Jun 2024 14:19:43 -0400 Subject: [PATCH 17/57] fix(swt): pass load_only down to Mt3dms.load() (#2222) Reproduce and fix #2198 --- autotest/test_seawat.py | 26 +++++++++++++++++++------- flopy/seawat/swt.py | 1 + 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/autotest/test_seawat.py b/autotest/test_seawat.py index 53fb2c500..aba1c125c 100644 --- a/autotest/test_seawat.py +++ b/autotest/test_seawat.py @@ -121,7 +121,7 @@ def test_seawat_henry(function_tmpdir): mswt.write_input() success, buff = mswt.run_model(silent=False) - assert success + assert success, buff @pytest.mark.slow @@ -227,13 +227,25 @@ def test_seawat_load_and_write(function_tmpdir, namfile, binary): m.write_input() - # TODO: run models in separate CI workflow? - # with regression testing & benchmarking? - run = False - if run: - success, buff = m.run_model(silent=False) - assert success +@requires_exe("swtv4") +def test_seawat_load_only(function_tmpdir): + namfile = swt4_namfiles()[0] + model_name = Path(namfile).name + m = Seawat.load(model_name, model_ws=Path(namfile).parent, verbose=True) + m.change_model_ws(function_tmpdir, reset_external=True) + m.write_input() + + files = function_tmpdir.glob("*.adv") + adv_file = next(files) + assert adv_file is not None + adv_file.unlink() + + load_only = ["btn", "dis", "bas6", "oc"] + m = Seawat.load( + model_name, model_ws=function_tmpdir, load_only=load_only, verbose=True + ) + assert set([pkg.upper() for pkg in load_only]) == set(m.get_package_list()) def test_vdf_vsc(function_tmpdir): diff --git a/flopy/seawat/swt.py b/flopy/seawat/swt.py index 82763bf7b..b10a41b58 100644 --- a/flopy/seawat/swt.py +++ b/flopy/seawat/swt.py @@ -495,6 +495,7 @@ def load( exe_name=None, verbose=verbose, model_ws=model_ws, + load_only=load_only, forgive=False, ) From 17bb2e95616eae65876415a10ca6fa0e394e8362 Mon Sep 17 00:00:00 2001 From: martclanor Date: Tue, 11 Jun 2024 20:21:30 +0200 Subject: [PATCH 18/57] docs: clean-up commented-out code and other comments (#2218) This PR aims to clean-up docs/comments in python files. Primarily, it removes commented-out lines of code that appear to have been unintentionally staged for commit. The deletions include: Old code prior to refactoring Assert statements Print debug statements Warning, logging, exception messages Function arguments etc. The deletions do not include: .docs/Notebooks/*.py As the changes are limited to comments, for the most part, they should not affect the existing behavior of the code, hence the "docs" label for this PR. --- .../scripts/uspb_capture_par.py | 5 - autotest/test_cbc_full3D.py | 1 - autotest/test_export.py | 16 --- autotest/test_grid.py | 5 +- autotest/test_grid_cases.py | 2 - autotest/test_gridgen.py | 1 - autotest/test_gridintersect.py | 3 - autotest/test_headufile.py | 1 - autotest/test_lake_connections.py | 3 - autotest/test_mf6.py | 2 +- autotest/test_mfsimlist.py | 3 - autotest/test_modflow.py | 34 ----- autotest/test_mp6.py | 3 - autotest/test_mp7.py | 1 - autotest/test_mp7_cases.py | 2 +- autotest/test_plot_map_view.py | 1 - autotest/test_postprocessing.py | 7 - autotest/test_sfr.py | 4 - autotest/test_specific_discharge.py | 2 +- autotest/test_str.py | 2 +- autotest/test_subwt.py | 8 -- autotest/test_uzf.py | 2 - autotest/test_zonbud_utility.py | 6 - flopy/discretization/grid.py | 7 - flopy/discretization/readme.md | 1 - flopy/discretization/vertexgrid.py | 1 - flopy/export/metadata.py | 6 - flopy/export/netcdf.py | 30 +---- flopy/export/shapefile_utils.py | 9 +- flopy/export/utils.py | 34 ----- flopy/export/vtk.py | 1 - flopy/mbase.py | 49 ------- flopy/mf6/coordinates/simulationtime.py | 26 ---- flopy/mf6/data/mfstructure.py | 1 - flopy/mf6/mfpackage.py | 2 - flopy/mfusg/mfusglpf.py | 1 - flopy/modflow/mf.py | 13 -- flopy/modflow/mfbas.py | 2 - flopy/modflow/mfbct.py | 2 - flopy/modflow/mffhb.py | 1 - flopy/modflow/mfflwob.py | 11 -- flopy/modflow/mfhfb.py | 1 - flopy/modflow/mflpf.py | 4 - flopy/modflow/mfmlt.py | 1 - flopy/modflow/mfmnw1.py | 10 -- flopy/modflow/mfmnw2.py | 14 +- flopy/modflow/mfmnwi.py | 4 - flopy/modflow/mfoc.py | 6 +- flopy/modflow/mfpar.py | 2 - flopy/modflow/mfparbc.py | 3 - flopy/modflow/mfpbc.py | 12 -- flopy/modflow/mfpcg.py | 2 - flopy/modflow/mfsfr2.py | 96 +------------- flopy/modflow/mfswi2.py | 1 - flopy/modflow/mfswt.py | 3 - flopy/modpath/mp6.py | 5 +- flopy/modpath/mp6bas.py | 2 - flopy/modpath/mp6sim.py | 11 -- flopy/modpath/mp7.py | 2 +- flopy/mt3d/mt.py | 8 -- flopy/mt3d/mtcts.py | 124 ------------------ flopy/mt3d/mtdsp.py | 6 - flopy/mt3d/mtssm.py | 27 ---- flopy/mt3d/mttob.py | 3 +- flopy/plot/crosssection.py | 3 - flopy/plot/plotutil.py | 18 +-- flopy/seawat/swt.py | 7 - flopy/seawat/swtvsc.py | 8 -- flopy/utils/binaryfile.py | 2 - flopy/utils/check.py | 14 -- flopy/utils/compare.py | 2 - flopy/utils/gridgen.py | 2 +- flopy/utils/gridintersect.py | 5 - flopy/utils/lgrutil.py | 2 +- flopy/utils/mflistfile.py | 1 - flopy/utils/mfreadnam.py | 1 - flopy/utils/mtlistfile.py | 17 --- flopy/utils/observationfile.py | 4 - flopy/utils/swroutputfile.py | 1 - flopy/utils/util_array.py | 24 ---- flopy/utils/util_list.py | 13 -- flopy/utils/zonbud.py | 4 +- scripts/process_benchmarks.py | 4 - 83 files changed, 25 insertions(+), 765 deletions(-) delete mode 100644 flopy/discretization/readme.md diff --git a/.docs/groundwater_paper/scripts/uspb_capture_par.py b/.docs/groundwater_paper/scripts/uspb_capture_par.py index bce3dadcd..1803ba5fe 100644 --- a/.docs/groundwater_paper/scripts/uspb_capture_par.py +++ b/.docs/groundwater_paper/scripts/uspb_capture_par.py @@ -311,17 +311,12 @@ def doit(): # increment icnt icnt += 1 - ## test cg_model function - # t = cf_model(models[0], klay, cells[0][0], cells[0][1], Qcf, baseQ) - # sys.stdout.write(t) - # create multiprocessing pool pool = mp.Pool(processes=nproc) args = [ (cf_model, idx, len(cells), klay, i, j, Qcf, baseQ, ml.lpf.hdry) for idx, (i, j) in enumerate(cells) ] - # sys.stdout.write(args) output = pool.map(unpack_args, args, nproc) pool.close() pool.join() diff --git a/autotest/test_cbc_full3D.py b/autotest/test_cbc_full3D.py index e347a70fa..9df833b16 100644 --- a/autotest/test_cbc_full3D.py +++ b/autotest/test_cbc_full3D.py @@ -31,7 +31,6 @@ def load_mf2005(path, ws_out): ) # change work space - # ws_out = os.path.join(baseDir, name) ml.change_model_ws(ws_out) # save all budget data to a cell-by cell file diff --git a/autotest/test_export.py b/autotest/test_export.py index 6b935527f..df550590b 100644 --- a/autotest/test_export.py +++ b/autotest/test_export.py @@ -443,12 +443,10 @@ def test_export_array(function_tmpdir, example_data_path): assert np.abs(val - m.modelgrid.extent[0]) < 1e-6 # ascii grid origin will differ if it was unrotated # without scipy.rotate - # assert np.abs(val - m.modelgrid.xoffset) < 1e-6 if "yllcorner" in line.lower(): val = float(line.strip().split()[-1]) assert np.abs(val - m.modelgrid.extent[2]) < 1e-6 # without scipy.rotate - # assert np.abs(val - m.modelgrid.yoffset) < 1e-6 if "cellsize" in line.lower(): val = float(line.strip().split()[-1]) rot_cellsize = ( @@ -540,8 +538,6 @@ def test_shapefile(function_tmpdir, namfile): fnc_name = function_tmpdir / f"{model.name}.shp" fnc = model.export(fnc_name) - # fnc2 = m.export(fnc_name, package_names=None) - # fnc3 = m.export(fnc_name, package_names=['DIS']) s = Reader(fnc_name) assert ( @@ -899,7 +895,6 @@ def cellid(k, i, j, nrow, ncol): spd6 = flopy.mf6.ModflowGwfriv.stress_period_data.empty( gwf, maxbound=len(spd) ) - # spd6[0]['cellid'] = cellid(spd.k, spd.i, spd.j, m.nrow, m.ncol) spd6[0]["cellid"] = list(zip(spd.k, spd.i, spd.j)) for c in spd.dtype.names: if c in spd6[0].dtype.names: @@ -907,7 +902,6 @@ def cellid(k, i, j, nrow, ncol): # MFTransient list apparently requires entries for additional stress periods, # even if they are the same spd6[1] = spd6[0] - # irch = np.zeros((nrow, ncol)) riv6 = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=spd6) rch6 = flopy.mf6.ModflowGwfrcha(gwf, recharge=rech) @@ -922,7 +916,6 @@ def cellid(k, i, j, nrow, ncol): if not has_pkg("shapefile"): return - # rch6.export('{}/mf6.shp'.format(baseDir)) m.export(function_tmpdir / "mfnwt.shp") gwf.export(function_tmpdir / "mf6.shp") @@ -963,7 +956,6 @@ def test_export_huge_shapefile(function_tmpdir): perlen = 1 nstp = 1 tsmult = 1 - # perioddata = [[perlen, nstp, tsmult]] * 2 botm = np.zeros((nlay, nrow, ncol)) m = flopy.modflow.Modflow( @@ -1236,8 +1228,6 @@ def test_vtk_add_packages(function_tmpdir, example_data_path): # todo: pakbase.export() for vtk!!!! m.dis.export(ws, fmt="vtk", xml=True, binary=False) filetocheck = function_tmpdir / "DIS.vtk" - # totalbytes = os.path.getsize(filetocheck) - # assert(totalbytes==1019857) assert count_lines_in_file(filetocheck) == 27239 # upw with point scalar output @@ -1288,8 +1278,6 @@ def test_vtk_mf6(function_tmpdir, example_data_path): # check one filetocheck = function_tmpdir / "twrihfb2015_000000.vtk" - # totalbytes = os.path.getsize(filetocheck) - # assert(totalbytes==21609) assert count_lines_in_file(filetocheck) == 9537 @@ -1498,7 +1486,6 @@ def test_vtk_vertex(function_tmpdir, example_data_path): # disv test workspace = example_data_path / "mf6" / "test003_gwfs_disv" - # outfile = os.path.join("vtk_transient_test", "vtk_pacakages") sim = MFSimulation.load(sim_ws=workspace) gwf = sim.get_model("gwf_1") @@ -1760,7 +1747,6 @@ def test_vtk_export_disv1_model(function_tmpdir): vtk_points = grid.GetPoints() vtk_points = vtk_points.GetData() vtk_points = vtk_to_numpy(vtk_points) - # print(vtk_points) # get cell locations (ia format of point to cell relationship) cell_locations = vtk_to_numpy(grid.GetCellLocationsArray()) @@ -1825,7 +1811,6 @@ def test_vtk_export_disv2_model(function_tmpdir): vtk_points = grid.GetPoints() vtk_points = vtk_points.GetData() vtk_points = vtk_to_numpy(vtk_points) - # print(vtk_points) # get cell locations (ia format of point to cell relationship) cell_locations = vtk_to_numpy(grid.GetCellLocationsArray()) @@ -2035,7 +2020,6 @@ def test_vtk_export_disu_model(function_tmpdir): vtk_points = grid.GetPoints() vtk_points = vtk_points.GetData() vtk_points = vtk_to_numpy(vtk_points) - # print(vtk_points) # get cell locations (ia format of point to cell relationship) cell_locations = vtk_to_numpy(grid.GetCellLocationsArray())[0:9] diff --git a/autotest/test_grid.py b/autotest/test_grid.py index d27337f37..f716d20bc 100644 --- a/autotest/test_grid.py +++ b/autotest/test_grid.py @@ -136,7 +136,6 @@ def test_get_vertices(): xgrid = mg.xvertices ygrid = mg.yvertices - # a1 = np.array(mg.xyvertices) a1 = np.array( [ [xgrid[0, 0], ygrid[0, 0]], @@ -225,9 +224,7 @@ def test_get_rc_from_node_coordinates(): delr = [0.5] * 5 + [2.0] * 5 nrow = 10 ncol = 10 - mfdis = ModflowDis( - mf, nrow=nrow, ncol=ncol, delr=delr, delc=delc - ) # , xul=50, yul=1000) + mfdis = ModflowDis(mf, nrow=nrow, ncol=ncol, delr=delr, delc=delc) ygrid, xgrid, zgrid = mfdis.get_node_coordinates() for i in range(nrow): for j in range(ncol): diff --git a/autotest/test_grid_cases.py b/autotest/test_grid_cases.py index 00c778a29..5f3e748e2 100644 --- a/autotest/test_grid_cases.py +++ b/autotest/test_grid_cases.py @@ -240,7 +240,6 @@ def voronoi_polygon(): [1330.11116, 1809.788273], [399.1804436, 2998.515188], [914.7728404, 5132.494831], - # [1831.381546, 6335.543757], ] poly = np.array(domain) max_area = 100.0**2 @@ -393,7 +392,6 @@ def voronoi_many_polygons(): y = radius * np.sin(theta) + 20.0 circle_poly1 = [(x, y) for x, y in zip(x, y)] tri.add_polygon(circle_poly1) - # tri.add_hole((70, 20)) # add line through domain to force conforming cells line = [(x, x) for x in np.linspace(11, 89, 100)] diff --git a/autotest/test_gridgen.py b/autotest/test_gridgen.py index dc13a9204..66a8a3a98 100644 --- a/autotest/test_gridgen.py +++ b/autotest/test_gridgen.py @@ -829,7 +829,6 @@ def test_gridgen(function_tmpdir): ) == 0 ), msg - # ms_u.disu.write_file() # test mfusg without vertical pass-through gu.vertical_pass_through = False diff --git a/autotest/test_gridintersect.py b/autotest/test_gridintersect.py index 43d8237fb..4497cc3a2 100644 --- a/autotest/test_gridintersect.py +++ b/autotest/test_gridintersect.py @@ -460,9 +460,6 @@ def test_rect_grid_linestring_in_and_out_of_cell2(): LineString([(5, 15), (5.0, 9), (15.0, 5.0), (5.0, 1.0)]) ) assert len(result) == 3 - # assert result.cellids[0] == (1, 0) - # assert result.cellids[1] == (1, 1) - # assert np.allclose(result.lengths.sum(), 21.540659228538015) @requires_pkg("shapely") diff --git a/autotest/test_headufile.py b/autotest/test_headufile.py index f275d5ff7..e00f5106c 100644 --- a/autotest/test_headufile.py +++ b/autotest/test_headufile.py @@ -57,7 +57,6 @@ def mfusg_model(module_tmpdir): ic = ra["nodenumber"][0] chdspd.append([ic, head, head]) - # gridprops = g.get_gridprops() gridprops = g.get_gridprops_disu5() # create the mfusg modoel diff --git a/autotest/test_lake_connections.py b/autotest/test_lake_connections.py index 40322b0ab..ae0690b55 100644 --- a/autotest/test_lake_connections.py +++ b/autotest/test_lake_connections.py @@ -241,9 +241,6 @@ def test_lake(function_tmpdir, example_data_path): gwf.dis.top = top_tm gwf.dis.botm = bot_tm.reshape(gwf.modelgrid.shape) - # v = gwf.dis.top.array - # v = gwf.dis.botm.array - k11_tm = k11.resample_to_grid( gwf.modelgrid, band=k11.bands[0], diff --git a/autotest/test_mf6.py b/autotest/test_mf6.py index 0e09c8258..34b9a25cd 100644 --- a/autotest/test_mf6.py +++ b/autotest/test_mf6.py @@ -763,7 +763,7 @@ def test_vor_binary_write(function_tmpdir, layered): "filename": "recharge.bin", "binary": True, "iprn": 1, - "data": np.full(vor.ncpl, 0.000001, dtype=float), # 0.000001, + "data": np.full(vor.ncpl, 0.000001, dtype=float), }, } chd_data = [ diff --git a/autotest/test_mfsimlist.py b/autotest/test_mfsimlist.py index 29ba1ce39..88f9a397b 100644 --- a/autotest/test_mfsimlist.py +++ b/autotest/test_mfsimlist.py @@ -160,7 +160,4 @@ def test_mfsimlist_memory_all(mem_option, function_tmpdir): total = 0.0 for key, value in mem_dict.items(): total += value["MEMORYSIZE"] - # total_ = mfsimlst.get_memory_usage(units=units) - # diff = total_ - total - # percent_diff = 100.0 * diff / total_ assert total > 0.0, "memory is not greater than zero" diff --git a/autotest/test_modflow.py b/autotest/test_modflow.py index c421869d5..5239ce4c6 100644 --- a/autotest/test_modflow.py +++ b/autotest/test_modflow.py @@ -555,8 +555,6 @@ def test_namfile_readwrite(function_tmpdir, example_data_path): delr=m.dis.delr.array, top=m.dis.top.array, botm=m.dis.botm.array, - # lenuni=3, - # length_multiplier=.3048, xoff=xll, yoff=yll, angrot=30, @@ -593,7 +591,6 @@ def test_namfile_readwrite(function_tmpdir, example_data_path): def test_read_usgs_model_reference(function_tmpdir, model_reference_path): nlay, nrow, ncol = 1, 30, 5 delr, delc = 250, 500 - # xll, yll = 272300, 5086000 mrf_path = function_tmpdir / model_reference_path.name shutil.copy(model_reference_path, mrf_path) @@ -782,32 +779,6 @@ def test_mflist_external(function_tmpdir): ml1.write_input() - # ml = Modflow( - # "mflist_test", - # model_ws=str(function_tmpdir), - # external_path=str(function_tmpdir / "ref"), - # ) - # dis = ModflowDis(ml, 1, 10, 10, nper=3, perlen=1.0) - # wel_data = { - # 0: [[0, 0, 0, -1], [1, 1, 1, -1]], - # 1: [[0, 0, 0, -2], [1, 1, 1, -1]], - # } - # wel = ModflowWel(ml, stress_period_data=wel_data) - # ml.write_input() - - # ml1 = Modflow.load( - # "mflist_test.nam", - # model_ws=ml.model_ws, - # verbose=True, - # forgive=False, - # check=False, - # ) - - # assert np.array_equal(ml.wel[0], ml1.wel[0]) - # assert np.array_equal(ml.wel[1], ml1.wel[1]) - - # ml1.write_input() - @excludes_platform("windows", ci_only=True) def test_single_mflist_entry_load(function_tmpdir, example_data_path): @@ -1111,7 +1082,6 @@ def test_default_oc_stress_period_data(function_tmpdir): lpf = ModflowLpf(m, ipakcb=100) wel_data = {0: [[0, 0, 0, -1000.0]]} wel = ModflowWel(m, ipakcb=101, stress_period_data=wel_data) - # spd = {(0, 0): ['save head', 'save budget']} oc = ModflowOc(m, stress_period_data=None) spd_oc = oc.stress_period_data tups = list(spd_oc.keys()) @@ -1269,10 +1239,6 @@ def test_load_with_list_reader(function_tmpdir): welra.tofile(f) welra.tofile(f) - # no need to run the model - # success, buff = m.run_model(silent=True) - # assert success, 'model did not terminate successfully' - # the m2 model will load all of these external files, possibly using sfac # and just create regular list input files for wel, drn, and ghb fname = "original.nam" diff --git a/autotest/test_mp6.py b/autotest/test_mp6.py index 962cc1074..f481fffd7 100644 --- a/autotest/test_mp6.py +++ b/autotest/test_mp6.py @@ -314,9 +314,6 @@ def test_loadtxt(function_tmpdir, mp6_test_path): ) assert np.array_equal(ra, ra2) - # epfilewithnans = os.path.join('../examples/data/mp6/', 'freybergmp.mpend') - # epd = EndpointFile(epfilewithnans) - @requires_exe("mf2005") def test_modpath(function_tmpdir, example_data_path): diff --git a/autotest/test_mp7.py b/autotest/test_mp7.py index f763e577b..ecadfd75d 100644 --- a/autotest/test_mp7.py +++ b/autotest/test_mp7.py @@ -866,7 +866,6 @@ def test_endpoint_output(function_tmpdir): f"endpoints in {os.path.basename(fpth0)} are not equal (within 1e-5) " f"to the endpoints in {os.path.basename(fpth1)}" ) - # assert not np.allclose(t0, t1), msg @requires_exe("mf6") diff --git a/autotest/test_mp7_cases.py b/autotest/test_mp7_cases.py index a85cdfe53..27a8f7ec3 100644 --- a/autotest/test_mp7_cases.py +++ b/autotest/test_mp7_cases.py @@ -65,7 +65,7 @@ class Mp7Cases: ) v = [(0,), (400,)] - pids = [1, 2] # [1000, 1001] + pids = [1, 2] part1 = ParticleData(v, structured=False, drape=1, particleids=pids) pg1 = ParticleGroup( particlegroupname="PG2", particledata=part1, filename="ex01a.pg2.sloc" diff --git a/autotest/test_plot_map_view.py b/autotest/test_plot_map_view.py index 2303d24ec..e6fc424b6 100644 --- a/autotest/test_plot_map_view.py +++ b/autotest/test_plot_map_view.py @@ -214,7 +214,6 @@ def test_map_view_contour_array_structured(function_tmpdir, ndim, rng): plt.clf() elif ndim == 2: # 1 layer as 2D - # arr[-1, :] = np.nan # add nan to test nan handling pmv = PlotMapView(modelgrid=grid, layer=l) contours = pmv.contour_array( a=arr.reshape(nlay, nrow, ncol)[l, :, :] diff --git a/autotest/test_postprocessing.py b/autotest/test_postprocessing.py index b3b6282c4..d45ab84f3 100644 --- a/autotest/test_postprocessing.py +++ b/autotest/test_postprocessing.py @@ -231,13 +231,6 @@ def test_get_structured_faceflows_freyberg( # plt.show() plt.close("all") - # uv0 = np.column_stack((q0.U, q0.V)) - # uv1 = np.column_stack((q1.U, q1.V)) - # diff = uv1 - uv0 - # assert ( - # np.allclose(uv0, uv1) - # ), "get_faceflows quivers are not equal to specific discharge vectors" - @pytest.mark.mf6 @requires_exe("mf6") diff --git a/autotest/test_sfr.py b/autotest/test_sfr.py index de08b964c..452e87123 100644 --- a/autotest/test_sfr.py +++ b/autotest/test_sfr.py @@ -162,8 +162,6 @@ def sfr_process(mfnam, sfrfile, model_ws, outfolder): "test1tr.nam", "test1tr.sfr", mf2005_model_path, function_tmpdir ) - # assert list(sfr.dataset_5.keys()) == [0, 1] - m, sfr = sfr_process( "testsfr2_tab.nam", "testsfr2_tab_ICALC1.sfr", @@ -429,7 +427,6 @@ def test_example(mf2005_model_path): delimiter=",", names=True, ) - # segment_data = {0: ss_segment_data} channel_flow_data = { 0: { @@ -480,7 +477,6 @@ def test_example(mf2005_model_path): dataset_5=dataset_5, ) - # assert istcb2 in m.package_units assert istcb2 in m.output_units assert True diff --git a/autotest/test_specific_discharge.py b/autotest/test_specific_discharge.py index 8a9703059..cd218ab30 100644 --- a/autotest/test_specific_discharge.py +++ b/autotest/test_specific_discharge.py @@ -430,7 +430,7 @@ def specific_discharge_comprehensive(function_tmpdir): assert np.isnan(qx[1, 0, 1]) # overall check - overall = np.nansum(qz) # np.nansum(qx) + np.nansum(qy) + np.nansum(qz) + overall = np.nansum(qz) assert np.allclose(overall, -4.43224582939148) # plot discharge in map view diff --git a/autotest/test_str.py b/autotest/test_str.py index 85c23333f..ccb9e72bf 100644 --- a/autotest/test_str.py +++ b/autotest/test_str.py @@ -28,7 +28,7 @@ def test_str_issue1164(function_tmpdir, example_data_path): # adjust stress period data spd0 = m.str.stress_period_data[0] - spd0["flow"][0] = 2.1149856e6 # 450000000000000000.0000e-17 + spd0["flow"][0] = 2.1149856e6 m.str.stress_period_data[0] = spd0 # write model datasets and run fixed diff --git a/autotest/test_subwt.py b/autotest/test_subwt.py index da649c19b..484a0f3bd 100644 --- a/autotest/test_subwt.py +++ b/autotest/test_subwt.py @@ -55,16 +55,10 @@ def test_subwt(function_tmpdir, ibound_path): ss=1.0e-6, ) - # temp_ib = np.ones((ml.nrow,ml.ncol),dtype=int) - # np.savetxt('temp_ib.dat',temp_ib,fmt='%1d') ibound = np.loadtxt(ibound_path) ibound[ibound == 5] = -1 ModflowBas(ml, ibound=ibound, strt=100.0) - # sp1_wells = pd.DataFrame(data=np.argwhere(ibound == 2), columns=['i', 'j']) - # sp1_wells.loc[:, 'k'] = 0 - # sp1_wells.loc[:, 'flux'] = 2200.0 - # sp1_wells = sp1_wells.loc[:, ['k', 'i', 'j', 'flux']].values.tolist() idxs = np.argwhere(ibound == 2) sp1_wells = [] for idx in idxs: @@ -111,8 +105,6 @@ def test_subwt(function_tmpdir, ibound_path): ml.run_model() - # contents = [f for f in function_tmpdir.glob("*.hds")] - hds_geo = HeadFile( function_tmpdir / f"{ml.name}.swt_geostatic_stress.hds", text="stress", diff --git a/autotest/test_uzf.py b/autotest/test_uzf.py index b95a852bb..ac3710ea1 100644 --- a/autotest/test_uzf.py +++ b/autotest/test_uzf.py @@ -639,8 +639,6 @@ def test_uzf_negative_iuzfopt(function_tmpdir): seepsurfk=True, ) - # uzf.write_file(os.path.join(model_ws, "uzf_neg.uzf")) - ml.write_input() success, buff = ml.run_model() assert success, "UZF model with -1 iuzfopt failed to run" diff --git a/autotest/test_zonbud_utility.py b/autotest/test_zonbud_utility.py index 4dd16c70c..8b4f35b66 100644 --- a/autotest/test_zonbud_utility.py +++ b/autotest/test_zonbud_utility.py @@ -125,12 +125,6 @@ def test_compare2zonebudget(cbc_f, zon_f, zbud_f, rtol): mxdiff = np.abs(a1 - a2).max() idxloc = np.argmax(np.abs(a1 - a2)) - # txt = '{}: {} - Max: {} a1: {} a2: {}'.format(time, - # name, - # mxdiff, - # a1[idxloc], - # a2[idxloc]) - # print(txt) s = f"Zonebudget arrays do not match at time {time} ({name}): {mxdiff}." assert allclose, s diff --git a/flopy/discretization/grid.py b/flopy/discretization/grid.py index fe5af9970..0a091320d 100644 --- a/flopy/discretization/grid.py +++ b/flopy/discretization/grid.py @@ -591,11 +591,6 @@ def zvertices(self): def xyzvertices(self): raise NotImplementedError("must define xyzvertices in child class") - # @property - # def indices(self): - # raise NotImplementedError( - # 'must define indices in child ' - # 'class to use this base class') @property def cross_section_vertices(self): return self.xyzvertices[0], self.xyzvertices[1] @@ -962,8 +957,6 @@ def get_local_coords(self, x, y): x, y = geometry.transform( x, y, self._xoff, self._yoff, self.angrot_radians, inverse=True ) - # x -= self._xoff - # y -= self._yoff return x, y diff --git a/flopy/discretization/readme.md b/flopy/discretization/readme.md deleted file mode 100644 index 9316037ea..000000000 --- a/flopy/discretization/readme.md +++ /dev/null @@ -1 +0,0 @@ -## Development notes for grid module \ No newline at end of file diff --git a/flopy/discretization/vertexgrid.py b/flopy/discretization/vertexgrid.py index a043e6d3d..ea49d1291 100644 --- a/flopy/discretization/vertexgrid.py +++ b/flopy/discretization/vertexgrid.py @@ -190,7 +190,6 @@ def shape(self): @property def top_botm(self): new_top = np.expand_dims(self._top, 0) - # new_botm = np.expand_dims(self._botm, 0) return np.concatenate((new_top, self._botm), axis=0) @property diff --git a/flopy/export/metadata.py b/flopy/export/metadata.py index f7bc32685..edb47af45 100644 --- a/flopy/export/metadata.py +++ b/flopy/export/metadata.py @@ -62,25 +62,19 @@ def __init__(self, sciencebase_id, model): # recommended global attributes self.naming_authority = "ScienceBase" # org. that provides the id - # self.history = None # This is a character array with a line for each invocation of a program that has modified the dataset. # Well-behaved generic netCDF applications should append a line containing: # date, time of day, user name, program name and command arguments. self.source = ( model.model_ws ) # The method of production of the original data. # If it was model-generated, source should name the model and its version. - # self.processing_level = None # A textual description of the processing (or quality control) level of the data. - # self.comment = None # Miscellaneous information about the data, not captured elsewhere. # This attribute is defined in the CF Conventions. self.acknowledgement = self._get_xml_attribute("datacred") - # self.license = None # - # self.standard_name_vocabulary = None self.date_created = self.sb["provenance"]["linkProcess"].get( "dateCreated" ) self.creator_name = self.creator.get("name") self.creator_email = self.creator.get("email") - # self.creator_url = self.sb['webLinks'][0].get('uri') self.creator_institution = self.creator["organization"].get( "displayText" ) diff --git a/flopy/export/netcdf.py b/flopy/export/netcdf.py index 02916ad06..cfc2907d2 100644 --- a/flopy/export/netcdf.py +++ b/flopy/export/netcdf.py @@ -187,8 +187,6 @@ def __init__( if self.model_grid.grid_type == "structured": self.dimension_names = ("layer", "y", "x") STANDARD_VARS.extend(["delc", "delr"]) - # elif self.model_grid.grid_type == 'vertex': - # self.dimension_names = ('layer', 'ncpl') else: raise Exception( f"Grid type {self.model_grid.grid_type} not supported." @@ -400,9 +398,6 @@ def append(self, other, suffix="_1"): attrs["name"] = new_vname attrs["long_name"] = attrs["long_name"] + " " + suffix var = self.nc.variables[vname_norm] - # assert var.shape == array.shape,\ - # "{0} shape ({1}) doesn't make array shape ({2})".\ - # format(new_vname,str(var.shape),str(array.shape)) new_var = self.create_variable( new_vname, attrs, var.dtype, dimensions=var.dimensions ) @@ -944,14 +939,6 @@ def initialize_file(self, time_values=None): "This grid HAS been rotated before being saved to NetCDF. " "To compute the unrotated grid, use the origin point and this array." ) - # else: - # vertices - # attribs = {"units": self.model_grid.lenuni.strip('s'), - # "long_name": NC_LONG_NAMES.get("vertices", - # "List of vertices used in the model by cell"), - # } - # vertices = self.create_variable('vertices', attribs, dimensions=('ncpl',)) - # vertices[:] = self.model_grid.vertices # Workaround for CF/CDM. # http://www.unidata.ucar.edu/software/thredds/current/netcdf-java/ @@ -1248,18 +1235,6 @@ def create_variable( if self.nc is None: self.initialize_file() - # check that the requested dimension exists and - # build up the chuck sizes - # chunks = [] - # for dimension in dimensions: - # assert self.nc.dimensions.get(dimension) is not None, \ - # "netcdf.create_variable() dimension not found:" + dimension - # chunk = self.chunks[dimension] - # assert chunk is not None, \ - # "netcdf.create_variable() chunk size of {0} is None in self.chunks". \ - # format(dimension) - # chunks.append(chunk) - self.var_attr_dict[name] = attributes var = self.nc.createVariable( @@ -1268,8 +1243,7 @@ def create_variable( dimensions, fill_value=self.fillvalue, zlib=True, - ) # , - # chunksizes=tuple(chunks)) + ) for k, v in attributes.items(): try: var.setncattr(k, v) @@ -1299,7 +1273,6 @@ def add_global_attributes(self, attr_dict): """ if self.nc is None: - # self.initialize_file() mess = ( "NetCDF.add_global_attributes() should only " "be called after the file has been initialized" @@ -1408,7 +1381,6 @@ def get_entries(ds): return stuff # get a list of the flopy classes - # packages = inspect.getmembers(flopy.modflow, inspect.isclass) packages = [(pp.name[0], pp) for pp in self.model.packagelist] # get a list of the NetCDF variables attr = [v.split("_")[-1] for v in self.nc.variables] diff --git a/flopy/export/shapefile_utils.py b/flopy/export/shapefile_utils.py index b32df38ec..7c2303874 100644 --- a/flopy/export/shapefile_utils.py +++ b/flopy/export/shapefile_utils.py @@ -322,7 +322,6 @@ def model_attributes_to_shapefile( ) continue name = shape_attr_name(a.name, keep_layer=True) - # name = a.name.lower() array_dict[name] = a.array elif a.data_type == DataType.array3d: # Not sure how best to check if an object has array data @@ -362,9 +361,7 @@ def model_attributes_to_shapefile( assert arr.shape == horz_shape name = f"{aname}_{ilay + 1}" array_dict[name] = arr - elif ( - a.data_type == DataType.transient2d - ): # elif isinstance(a, Transient2d): + elif a.data_type == DataType.transient2d: # Not sure how best to check if an object has array data try: assert a.array is not None @@ -379,9 +376,7 @@ def model_attributes_to_shapefile( arr = a.array[kper][0] assert arr.shape == horz_shape array_dict[name] = arr - elif ( - a.data_type == DataType.transientlist - ): # elif isinstance(a, MfList): + elif a.data_type == DataType.transientlist: try: list(a.masked_4D_arrays_itr()) except: diff --git a/flopy/export/utils.py b/flopy/export/utils.py index b82f934f4..575413436 100644 --- a/flopy/export/utils.py +++ b/flopy/export/utils.py @@ -928,7 +928,6 @@ def mflist_export(f: Union[str, os.PathLike, NetCdf], mfl, **kwargs): arrays = mfl.to_array(kk) for name, array in arrays.items(): for k in range(array.shape[0]): - # aname = name+"{0:03d}_{1:02d}".format(kk, k) n = shapefile_utils.shape_attr_name(name, length=4) aname = f"{n}{k + 1}{int(kk) + 1}" array_dict[aname] = array[k] @@ -963,11 +962,7 @@ def mflist_export(f: Union[str, os.PathLike, NetCdf], mfl, **kwargs): elif isinstance(f, NetCdf) or isinstance(f, dict): base_name = mfl.package.name[0].lower() - # f.log("getting 4D masked arrays for {0}".format(base_name)) - # m4d = mfl.masked_4D_arrays - # f.log("getting 4D masked arrays for {0}".format(base_name)) - # for name, array in m4d.items(): for name, array in mfl.masked_4D_arrays_itr(): var_name = f"{base_name}_{name}" if isinstance(f, dict): @@ -1077,9 +1072,7 @@ def transient2d_export(f: Union[str, os.PathLike], t2d, fmt=None, **kwargs): ibnd = np.abs(modelgrid.idomain).sum(axis=0) mask = ibnd == 0 - # f.log("getting 4D array for {0}".format(t2d.name_base)) array = t2d.array - # f.log("getting 4D array for {0}".format(t2d.name_base)) with np.errstate(invalid="ignore"): if array.dtype not in [int, np.int32, np.int64]: if mask is not None: @@ -1091,12 +1084,6 @@ def transient2d_export(f: Union[str, os.PathLike], t2d, fmt=None, **kwargs): mx, mn = np.nanmax(array), np.nanmin(array) array[array <= min_valid] = netcdf.FILLVALUE array[array >= max_valid] = netcdf.FILLVALUE - # if t2d.model.bas6 is not None: - # array[:, 0, t2d.model.bas6.ibound.array[0] == 0] = \ - # f.fillvalue - # elif t2d.model.btn is not None: - # array[:, 0, t2d.model.btn.icbund.array[0] == 0] = \ - # f.fillvalue var_name = t2d.name.replace("_", "") if isinstance(f, dict): @@ -1237,41 +1224,22 @@ def array3d_export(f: Union[str, os.PathLike], u3d, fmt=None, **kwargs): if isinstance(var_name, list) or isinstance(var_name, tuple): var_name = var_name[0] var_name = var_name.replace(" ", "_").lower() - # f.log("getting 3D array for {0}".format(var_name)) array = u3d.array - # this is for the crappy vcont in bcf6 - # if isinstance(f,NetCdf) and array.shape != f.shape: - # f.log("broadcasting 3D array for {0}".format(var_name)) - # full_array = np.empty(f.shape) - # full_array[:] = np.nan - # full_array[:array.shape[0]] = array - # array = full_array - # f.log("broadcasting 3D array for {0}".format(var_name)) - # f.log("getting 3D array for {0}".format(var_name)) - # mask = None if modelgrid.idomain is not None and "ibound" not in var_name: mask = modelgrid.idomain == 0 if mask is not None and array.shape != mask.shape: - # f.log("broadcasting 3D array for {0}".format(var_name)) full_array = np.empty(mask.shape) full_array[:] = np.nan full_array[: array.shape[0]] = array array = full_array - # f.log("broadcasting 3D array for {0}".format(var_name)) # runtime warning issued in some cases - need to track down cause # happens when NaN is already in array with np.errstate(invalid="ignore"): if array.dtype not in [int, np.int32, np.int64]: - # if u3d.model.modelgrid.bas6 is not None and "ibound" not - # in var_name: - # array[u3d.model.modelgrid.bas6.ibound.array == 0] = - # np.nan - # elif u3d.model.btn is not None and 'icbund' not in var_name: - # array[u3d.model.modelgrid.btn.icbund.array == 0] = np.nan if mask is not None: array[mask] = np.nan array[array <= min_valid] = np.nan @@ -1411,9 +1379,7 @@ def array2d_export( elif isinstance(f, NetCdf) or isinstance(f, dict): # try to mask the array - assume layer 1 ibound is a good mask - # f.log("getting 2D array for {0}".format(u2d.name)) array = u2d.array - # f.log("getting 2D array for {0}".format(u2d.name)) with np.errstate(invalid="ignore"): if array.dtype not in [int, np.int32, np.int64]: diff --git a/flopy/export/vtk.py b/flopy/export/vtk.py index 96703dc67..16aa5dcc0 100644 --- a/flopy/export/vtk.py +++ b/flopy/export/vtk.py @@ -574,7 +574,6 @@ def _build_hfbs(self, pkg): pts = [] for v in v1: - # ix = np.where(v2 == v) ix = np.where((v2.T[0] == v[0]) & (v2.T[1] == v[1])) if len(ix[0]) > 0 and len(pts) < 2: pts.append(v2[ix[0][0]]) diff --git a/flopy/mbase.py b/flopy/mbase.py index 525d754e3..0cf0f727e 100644 --- a/flopy/mbase.py +++ b/flopy/mbase.py @@ -570,37 +570,6 @@ def laycbd(self): except AttributeError: return None - # we don't need these - no need for controlled access to array_free_format - # def set_free_format(self, value=True): - # """ - # Set the free format flag for the model instance - # - # Parameters - # ---------- - # value : bool - # Boolean value to set free format flag for model. (default is True) - # - # Returns - # ------- - # - # """ - # if not isinstance(value, bool): - # print('Error: set_free_format passed value must be a boolean') - # return False - # self.array_free_format = value - # - # def get_free_format(self): - # """ - # Return the free format flag for the model - # - # Returns - # ------- - # out : bool - # Free format flag for the model - # - # """ - # return self.array_free_format - def next_unit(self, i=None): if i is not None: self.__onunit__ = i - 1 @@ -759,7 +728,6 @@ def __getattr__(self, item): else: return None - # return self.get_package(item) # to avoid infinite recursion if ( item == "_packagelist" @@ -1187,10 +1155,6 @@ def add_existing_package( ptype = filename.split(".")[-1] ptype = str(ptype).upper() - # for pak in self.packagelist: - # if ptype in pak.name: - # print("BaseModel.add_existing_package() warning: " +\ - # "replacing existing package {0}".format(ptype)) class Obj: pass @@ -1331,10 +1295,6 @@ def change_model_ws( os.makedirs(new_pth) except: raise OSError(f"{new_pth} not valid, workspace-folder") - # line = '\n{} not valid, workspace-folder '.format(new_pth) + \ - # 'was changed to {}\n'.format(os.getcwd()) - # print(line) - # new_pth = os.getcwd() # --reset the model workspace old_pth = self._model_ws @@ -1368,14 +1328,10 @@ def _reset_external(self, pth, old_pth): for ext_file, output in zip( self.external_fnames, self.external_output ): - # new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1]) # this is a wicked mess if output: - # new_ext_file = os.path.join(pth, os.path.split(ext_file)[-1]) new_ext_file = ext_file else: - # fpth = os.path.abspath(os.path.join(old_pth, ext_file)) - # new_ext_file = os.path.relpath(fpth, os.path.abspath(pth)) fdir = os.path.dirname(ext_file) if fdir == "": fpth = os.path.abspath(os.path.join(old_pth, ext_file)) @@ -1407,8 +1363,6 @@ def _set_name(self, value): def __setattr__(self, key, value): if key == "free_format_input": - # if self.bas6 is not None: - # self.bas6.ifrefm = value super().__setattr__(key, value) elif key == "name": self._set_name(value) @@ -1533,7 +1487,6 @@ def write_input(self, SelPackList=False, check=False): print(" ") # write name file self.write_name_file() - # os.chdir(org_dir) def write_name_file(self): """ @@ -1798,8 +1751,6 @@ def run_model( def q_output(output, q): for line in iter(output.readline, b""): q.put(line) - # time.sleep(1) - # output.close() # create a list of arguments to pass to Popen if processors is not None: diff --git a/flopy/mf6/coordinates/simulationtime.py b/flopy/mf6/coordinates/simulationtime.py index df58080b0..9ef891508 100644 --- a/flopy/mf6/coordinates/simulationtime.py +++ b/flopy/mf6/coordinates/simulationtime.py @@ -51,16 +51,6 @@ def get_num_steps(self): def get_mult(self): return self._tsmult - # def get_ts_start_time(self, timestep): - - # def get_sp_start_time(self, timestep): - - # def get_ts_end_time(self, timestep): - - # def get_sp_end_time(self, timestep): - - # def get_ts_length(self, timestep): - class SimulationTime: """ @@ -92,14 +82,6 @@ class SimulationTime: def __init__(self, simdata): self.simdata = simdata - # self.time_units = simdata[('TDIS', 'OPTIONS', 'time_units')] - # self.stress_periods = simdata[('TDIS', 'STRESS_PERIODS', - # 'perlen,nstp,tsmult') - # self.calendar_start_time = calendar_start_time - - # def get_stress_period_array(self): - # return np.arange(1, self.get_num_stress_periods(), 1, int) - def get_time_units(self): time_units = self.simdata.mfdata[ ("tdis", "options", "time_units") @@ -132,11 +114,3 @@ def get_sp_time_steps(self, sp_num): f"Stress period {sp_num} was requested but does not exist." ) return period_data[sp_num][1] - - # def get_stress_period(self, sp_num): - - # def remove_stress_period(self, num_stress_period): - - # def copy_append_stress_period(self, sp_num): - - # def split_stress_period(self, sp_num): diff --git a/flopy/mf6/data/mfstructure.py b/flopy/mf6/data/mfstructure.py index 0b4c65969..34a78a780 100644 --- a/flopy/mf6/data/mfstructure.py +++ b/flopy/mf6/data/mfstructure.py @@ -1440,7 +1440,6 @@ def __init__(self, data_item, model_data, package_type, dfn_list): self.parameter_name = data_item.parameter_name self.one_per_pkg = data_item.one_per_pkg - # self.data_item_structures_dict = {} self.data_item_structures = [] self.expected_data_items = {} self.shape = data_item.shape diff --git a/flopy/mf6/mfpackage.py b/flopy/mf6/mfpackage.py index f16d7be09..3c809d667 100644 --- a/flopy/mf6/mfpackage.py +++ b/flopy/mf6/mfpackage.py @@ -1473,7 +1473,6 @@ def _write_block(self, fd, block_header, ext_file_action): if basic_list: ext_fname = dataset.external_file_name() if ext_fname is not None: - # if dataset.has_modified_ext_data(): binary = dataset.binary_ext_data() # write block contents to external file fd_main, fd = self._prepare_external( @@ -1503,7 +1502,6 @@ def _write_block(self, fd, block_header, ext_file_action): if basic_list: ext_fname = dataset.external_file_name(transient_key) if ext_fname is not None: - # if dataset.has_modified_ext_data(transient_key): binary = dataset.binary_ext_data(transient_key) # write block contents to external file fd_main, fd = self._prepare_external( diff --git a/flopy/mfusg/mfusglpf.py b/flopy/mfusg/mfusglpf.py index 5e416f121..a3ca1bab0 100644 --- a/flopy/mfusg/mfusglpf.py +++ b/flopy/mfusg/mfusglpf.py @@ -776,7 +776,6 @@ def _load_layer_properties( parm_dict = {} if nplpf > 0: par_types, parm_dict = mfpar.load(f_obj, nplpf, model.verbose) - # print parm_dict # non-parameter data transient = not dis.steady.all() diff --git a/flopy/modflow/mf.py b/flopy/modflow/mf.py index 40f279095..b1c2a65ae 100644 --- a/flopy/modflow/mf.py +++ b/flopy/modflow/mf.py @@ -147,9 +147,6 @@ def __init__( # external option stuff self.array_free_format = True self.array_format = "modflow" - # self.external_fnames = [] - # self.external_units = [] - # self.external_binflag = [] self.load_fail = False # the starting external data unit number @@ -237,16 +234,6 @@ def __repr__(self): ) return s - # - # def next_ext_unit(self): - # """ - # Function to encapsulate next_ext_unit attribute - # - # """ - # next_unit = self.__next_ext_unit + 1 - # self.__next_ext_unit += 1 - # return next_unit - @property def modeltime(self): if self.get_package("disu") is not None: diff --git a/flopy/modflow/mfbas.py b/flopy/modflow/mfbas.py index bae50fdb5..66433fd3d 100644 --- a/flopy/modflow/mfbas.py +++ b/flopy/modflow/mfbas.py @@ -141,8 +141,6 @@ def __init__( self.ichflg = ichflg self.stoper = stoper - # self.ifrefm = ifrefm - # model.array_free_format = ifrefm model.free_format_input = ifrefm self.hnoflo = hnoflo diff --git a/flopy/modflow/mfbct.py b/flopy/modflow/mfbct.py index 6f0db5368..dbd511521 100644 --- a/flopy/modflow/mfbct.py +++ b/flopy/modflow/mfbct.py @@ -70,8 +70,6 @@ def __init__( self.porosity = Util3d( model, (nlay, nrow, ncol), np.float32, porosity, "porosity" ) - # self.arad = Util2d(model, (1, nja), np.float32, - # arad, 'arad') self.dlh = Util3d(model, (nlay, nrow, ncol), np.float32, dlh, "dlh") self.dlv = Util3d(model, (nlay, nrow, ncol), np.float32, dlv, "dlv") self.dth = Util3d(model, (nlay, nrow, ncol), np.float32, dth, "dth") diff --git a/flopy/modflow/mffhb.py b/flopy/modflow/mffhb.py index 2aec2aa09..0706901f8 100644 --- a/flopy/modflow/mffhb.py +++ b/flopy/modflow/mffhb.py @@ -335,7 +335,6 @@ def write_file(self): """ nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper f = open(self.fn_path, "w") - # f.write('{0:s}\n'.format(self.heading)) # Data set 1 f.write(f"{self.nbdtim} ") diff --git a/flopy/modflow/mfflwob.py b/flopy/modflow/mfflwob.py index 90e396ea0..1513ed3f0 100644 --- a/flopy/modflow/mfflwob.py +++ b/flopy/modflow/mfflwob.py @@ -164,38 +164,28 @@ def __init__( ] pakunits = {"chob": 40, "gbob": 41, "drob": 42, "rvob": 43} outunits = {"chob": 140, "gbob": 141, "drob": 142, "rvob": 143} - # if unitnumber is None: - # unitnumber = [40, 140, 41, 141, 42, 142, 43, 143] if flowtype.upper().strip() == "CHD": name = ["CHOB", "DATA"] extension = extension[0:2] - # unitnumber = unitnumber[0:2] - # iufbobsv = unitnumber[1] self._ftype = "CHOB" self.url = "chob.html" self.heading = "# CHOB for MODFLOW, generated by Flopy." elif flowtype.upper().strip() == "GHB": name = ["GBOB", "DATA"] extension = extension[2:4] - # unitnumber = unitnumber[2:4] - # iufbobsv = unitnumber[1] self._ftype = "GBOB" self.url = "gbob.html" self.heading = "# GBOB for MODFLOW, generated by Flopy." elif flowtype.upper().strip() == "DRN": name = ["DROB", "DATA"] extension = extension[4:6] - # unitnumber = unitnumber[4:6] - # iufbobsv = unitnumber[1] self._ftype = "DROB" self.url = "drob.html" self.heading = "# DROB for MODFLOW, generated by Flopy." elif flowtype.upper().strip() == "RIV": name = ["RVOB", "DATA"] extension = extension[6:8] - # unitnumber = unitnumber[6:8] - # iufbobsv = unitnumber[1] self._ftype = "RVOB" self.url = "rvob.html" self.heading = "# RVOB for MODFLOW, generated by Flopy." @@ -318,7 +308,6 @@ def write_file(self): # write sections 3-5 looping through observations groups c = 0 for i in range(self.nqfb): - # while (i < self.nqfb): # write section 3 f_fbob.write(f"{self.nqobfb[i]:10d}{self.nqclfb[i]:10d}\n") diff --git a/flopy/modflow/mfhfb.py b/flopy/modflow/mfhfb.py index 19f30fbee..6d9479c13 100644 --- a/flopy/modflow/mfhfb.py +++ b/flopy/modflow/mfhfb.py @@ -313,7 +313,6 @@ def load(cls, f, model, ext_unit_dict=None): it = 2 while it < len(t): toption = t[it] - # print it, t[it] if toption.lower() == "noprint": options.append(toption) elif "aux" in toption.lower(): diff --git a/flopy/modflow/mflpf.py b/flopy/modflow/mflpf.py index 0b192bcd7..8dd6fd33b 100644 --- a/flopy/modflow/mflpf.py +++ b/flopy/modflow/mflpf.py @@ -480,9 +480,6 @@ def load(cls, f, model, ext_unit_dict=None, check=True): if model.version == "mfusg" and not model.structured: ikcflag = int(t[3]) item1_len = 4 - # if ipakcb != 0: - # model.add_pop_key_list(ipakcb) - # ipakcb = 53 # options storagecoefficient = False constantcv = False @@ -546,7 +543,6 @@ def load(cls, f, model, ext_unit_dict=None, check=True): par_types = [] if nplpf > 0: par_types, parm_dict = mfpar.load(f, nplpf, model.verbose) - # print parm_dict # non-parameter data transient = not dis.steady.all() diff --git a/flopy/modflow/mfmlt.py b/flopy/modflow/mfmlt.py index 5073420fd..69413dffc 100644 --- a/flopy/modflow/mfmlt.py +++ b/flopy/modflow/mfmlt.py @@ -84,7 +84,6 @@ def __init__( if mult_dict is not None: self.nml = len(mult_dict) self.mult_dict = mult_dict - # print mult_dict self.parent.add_package(self) def write_file(self): diff --git a/flopy/modflow/mfmnw1.py b/flopy/modflow/mfmnw1.py index 316f32313..d18c883b8 100644 --- a/flopy/modflow/mfmnw1.py +++ b/flopy/modflow/mfmnw1.py @@ -127,12 +127,6 @@ def __init__( losstype # -string indicating head loss type for each well ) self.wel1_bynode_qsum = wel1_bynode_qsum # -nested list containing file names, unit numbers, and ALLTIME flag for auxiliary output, e.g. [['test.ByNode',92,'ALLTIME']] - # if stress_period_data is not None: - # for per, spd in stress_period_data.items(): - # for n in spd.dtype.names: - # self.stress_period_data[per] = ModflowMnw1.get_empty_stress_period_data(len(spd), - # structured=self.parent.structured) - # self.stress_period_data[per][n] = stress_period_data[per][n] if dtype is not None: self.dtype = dtype else: @@ -149,9 +143,6 @@ def __init__( "LOSSTYPE (%s) must be one of the following: skin, linear, nonlinear" % (self.losstype) ) - # auxFileExtensions = ['wl1','ByNode','Qsum'] - # for each in self.wel1_bynode_qsum: - # assert each[0].split('.')[1] in auxFileExtensions, 'File extensions in "wel1_bynode_qsum" must be one of the following: ".wl1", ".ByNode", or ".Qsum".' self.parent.add_package(self) @staticmethod @@ -278,7 +269,6 @@ def write_file(self): """ # -open file for writing - # f_mnw1 = open( self.file_name[0], 'w' ) f = open(self.fn_path, "w") # -write header diff --git a/flopy/modflow/mfmnw2.py b/flopy/modflow/mfmnw2.py index f77859123..1ba021496 100644 --- a/flopy/modflow/mfmnw2.py +++ b/flopy/modflow/mfmnw2.py @@ -731,9 +731,7 @@ def _set_attributes_from_node_data(self): names = Mnw.get_item2_names(node_data=self.node_data) for n in names: # assign by node variables as lists if they are being included - if ( - n in self.by_node_variables - ): # and len(np.unique(self.node_data[n])) > 1: + if n in self.by_node_variables: self.__dict__[n] = list(self.node_data[n]) else: self.__dict__[n] = self.node_data[n][0] @@ -841,7 +839,6 @@ def _getloc(n): continue # only write variables by node if they are unique lists > length 1 if len(np.unique(val)) > 1: - # if isinstance(val, list) or val < 0: fmt = " " + float_format f_mnw.write(fmt.format(self.node_data[var][n])) f_mnw.write("\n") @@ -1059,10 +1056,7 @@ def __init__( ] # recarray of Mnw properties by node self.nodtot = len(self.node_data) self._sort_node_data() - # self.node_data.sort(order=['wellid', 'k']) - # Python 3.5.0 produces a segmentation fault when trying to sort BR MNW wells - # self.node_data.sort(order='wellid', axis=0) self.mnw = mnw # dict or list of Mnw objects self.stress_period_data = MfList( @@ -1553,12 +1547,6 @@ def make_mnw_objects(self): for wellid in mnws: nd = node_data[node_data.wellid == wellid] nnodes = Mnw.get_nnodes(nd) - # if tops and bottoms are specified, flip nnodes - # maxtop = np.max(nd.ztop) - # minbot = np.min(nd.zbotm) - # if maxtop - minbot > 0 and nnodes > 0: - # nnodes *= -1 - # reshape stress period data to well mnwspd = Mnw.get_empty_stress_period_data( self.nper, aux_names=self.aux ) diff --git a/flopy/modflow/mfmnwi.py b/flopy/modflow/mfmnwi.py index b49bfc400..a0fbdb89d 100644 --- a/flopy/modflow/mfmnwi.py +++ b/flopy/modflow/mfmnwi.py @@ -314,10 +314,6 @@ def write_file(self): # -open file for writing f = open(self.fn_path, "w") - # header not supported - # # -write header - # f.write('{}\n'.format(self.heading)) - # dataset 1 - WEL1flag QSUMflag SYNDflag line = f"{self.wel1flag:10d}" line += f"{self.qsumflag:10d}" diff --git a/flopy/modflow/mfoc.py b/flopy/modflow/mfoc.py index 5a71c3e62..b98f38ac7 100644 --- a/flopy/modflow/mfoc.py +++ b/flopy/modflow/mfoc.py @@ -363,13 +363,13 @@ def check(self, f=None, verbose=True, level=1, checktype=None): if len(words) < 2: chk._add_to_summary( "Warning", - package="OC", # value=kperkstp, + package="OC", desc=f"action {action!r} ignored; too few words", ) elif words[0:2] not in expected_actions: chk._add_to_summary( "Warning", - package="OC", # value=kperkstp, + package="OC", desc=f"action {action!r} ignored", ) # TODO: check data list of layers for some actions @@ -377,7 +377,7 @@ def check(self, f=None, verbose=True, level=1, checktype=None): # repeat as many times as remaining keys not used chk._add_to_summary( "Warning", - package="OC", # value=kperkstp, + package="OC", desc="action(s) defined in OC stress_period_data ignored " "as they are not part the stress periods defined by DIS", ) diff --git a/flopy/modflow/mfpar.py b/flopy/modflow/mfpar.py index 0b61758ec..de4cea6c4 100644 --- a/flopy/modflow/mfpar.py +++ b/flopy/modflow/mfpar.py @@ -301,10 +301,8 @@ def parameter_fill(model, shape, findkey, parm_dict, findlayer=None): pv = float(model.mfpar.pval.pval_dict[key.lower()]) except: pv = float(parval) - # print partyp, parval, nclu, clusters if partyp == findkey: for [layer, mltarr, zonarr, izones] in clusters: - # print layer, mltarr, zonarr, izones foundlayer = False if findlayer is None: foundlayer = True diff --git a/flopy/modflow/mfparbc.py b/flopy/modflow/mfparbc.py index a94e070c7..85250c576 100644 --- a/flopy/modflow/mfparbc.py +++ b/flopy/modflow/mfparbc.py @@ -203,7 +203,6 @@ def loadarray(f, npar, verbose=False): pinst, ] - # print bc_parms bcpar = ModflowParBc(bc_parms) return bcpar @@ -245,7 +244,6 @@ def parameter_bcfill(model, shape, parm_dict, pak_parms): dtype = np.float32 data = np.zeros(shape, dtype=dtype) for key, value in parm_dict.items(): - # print key, value pdict, idict = pak_parms.bc_parms[key] inst_data = idict[value] if model.mfpar.pval is None: @@ -257,7 +255,6 @@ def parameter_bcfill(model, shape, parm_dict, pak_parms): pv = float(pdict["parval"]) for [mltarr, zonarr, izones] in inst_data: model.parameter_load = True - # print mltarr, zonarr, izones if mltarr.lower() == "none": mult = np.ones(shape, dtype=dtype) else: diff --git a/flopy/modflow/mfpbc.py b/flopy/modflow/mfpbc.py index 2f857ea31..9239d1385 100644 --- a/flopy/modflow/mfpbc.py +++ b/flopy/modflow/mfpbc.py @@ -50,18 +50,6 @@ def __init__( self.mxcos, self.cosines = self.assign_layer_row_column_data( cosines, 3, zerobase=False ) - # self.mxcos = 0 - # if (cosines != None): - # error_message = 'cosines must have 3 columns' - # if (not isinstance(cosines, list)): - # cosines = [cosines] - # for a in cosines: - # a = np.atleast_2d(a) - # nr, nc = a.shape - # assert nc == 3, error_message - # if (nr > self.mxcos): - # self.mxcos = nr - # self.cosines = cosines self.np = 0 self.parent.add_package(self) diff --git a/flopy/modflow/mfpcg.py b/flopy/modflow/mfpcg.py index 2b2ee78d2..8145c21a8 100644 --- a/flopy/modflow/mfpcg.py +++ b/flopy/modflow/mfpcg.py @@ -248,7 +248,6 @@ def load(cls, f, model, ext_unit_dict=None): # free format if ifrfm: t = line_parse(line) - # t = line.strip().split() mxiter = int(t[0]) iter1 = int(t[1]) npcond = int(t[2]) @@ -262,7 +261,6 @@ def load(cls, f, model, ext_unit_dict=None): try: line = f.readline() t = line_parse(line) - # t = line.strip().split() hclose = float(t[0]) rclose = float(t[1]) relax = float(t[2]) diff --git a/flopy/modflow/mfsfr2.py b/flopy/modflow/mfsfr2.py index 68c5bffa9..dc155ecc7 100644 --- a/flopy/modflow/mfsfr2.py +++ b/flopy/modflow/mfsfr2.py @@ -306,9 +306,7 @@ class ModflowSfr2(Package): nsfrpar = 0 default_value = 0.0 - # LENUNI = {"u": 0, "f": 1, "m": 2, "c": 3} len_const = {1: 1.486, 2: 1.0, 3: 100.0} - # {"u": 0, "s": 1, "m": 2, "h": 3, "d": 4, "y": 5} time_const = {1: 1.0, 2: 60.0, 3: 3600.0, 4: 86400.0, 5: 31557600.0} def __init__( @@ -511,7 +509,6 @@ def __init__( nseg = len(segment_data[i]) self.segment_data[i] = self.get_empty_segment_data(nseg) for n in segment_data[i].dtype.names: - # inds = (segment_data[i]['nseg'] -1).astype(int) self.segment_data[i][n] = segment_data[i][n] # compute outreaches if nseg and outseg columns have non-default values if ( @@ -920,7 +917,6 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): # these could also be implemented as structured arrays with a column for segment number current_6d = {} current_6e = {} - # print(i,icalc,nstrm,isfropt,reachinput) for j in range(itmp): dataset_6a = _parse_6a(f.readline(), option) current_aux[j] = dataset_6a[-1] @@ -970,7 +966,6 @@ def load(cls, f, model, nper=None, gwt=False, nsol=1, ext_unit_dict=None): dataset_6d.append( _get_dataset(f.readline(), [0.0] * 8) ) - # dataset_6d.append(list(map(float, f.readline().strip().split()))) current_6d[temp_nseg] = dataset_6d if icalc == 4: nstrpts = dataset_6a[5] @@ -1095,7 +1090,7 @@ def check(self, f=None, verbose=True, level=1, checktype=None): pth = os.path.join(self.parent.model_ws, f) f = open(pth, "w") f.write(f"{chk.txt}\n") - # f.close() + f.close() return chk def assign_layers(self, adjust_botms=False, pad=1.0): @@ -1152,8 +1147,6 @@ def assign_layers(self, adjust_botms=False, pad=1.0): self.reach_data.j == jb ) botm[-1, ib, jb] = streambotms[inds].min() - pad - # l.append(botm[-1, ib, jb]) - # botm[-1, below_i, below_j] = streambotms[below] - pad l.append(botm[-1, below_i, below_j]) header += ",new_model_botm" self.parent.dis.botm = botm @@ -1205,65 +1198,7 @@ def get_outlets(self, level=0, verbose=True): per > 0 > self.dataset_5[per][0] ): # skip stress periods where seg data not defined continue - # segments = self.segment_data[per].nseg - # outsegs = self.segment_data[per].outseg - # - # all_outsegs = np.vstack([segments, outsegs]) - # max_outseg = all_outsegs[-1].max() - # knt = 1 - # while max_outseg > 0: - # - # nextlevel = np.array([outsegs[s - 1] if s > 0 and s < 999999 else 0 - # for s in all_outsegs[-1]]) - # - # all_outsegs = np.vstack([all_outsegs, nextlevel]) - # max_outseg = nextlevel.max() - # if max_outseg == 0: - # break - # knt += 1 - # if knt > self.nss: - # # subset outsegs map to only include rows with outseg number > 0 in last column - # circular_segs = all_outsegs.T[all_outsegs[-1] > 0] - # - # # only retain one instance of each outseg number at iteration=nss - # vals = [] # append outseg values to vals after they've appeared once - # mask = [(True, vals.append(v))[0] - # if v not in vals - # else False for v in circular_segs[-1]] - # circular_segs = circular_segs[:, np.array(mask)] - # - # # cull the circular segments array to remove duplicate instances of routing circles - # circles = [] - # duplicates = [] - # for i in range(np.shape(circular_segs)[0]): - # # find where values in the row equal the last value; - # # record the index of the second to last instance of last value - # repeat_start_ind = np.where(circular_segs[i] == circular_segs[i, -1])[0][-2:][0] - # # use that index to slice out the repeated segment sequence - # circular_seq = circular_segs[i, repeat_start_ind:].tolist() - # # keep track of unique sequences of repeated segments - # if set(circular_seq) not in circles: - # circles.append(set(circular_seq)) - # duplicates.append(False) - # else: - # duplicates.append(True) - # circular_segs = circular_segs[~np.array(duplicates), :] - # - # txt += '{0} instances where an outlet was not found after {1} consecutive segments!\n' \ - # .format(len(circular_segs), self.nss) - # if level == 1: - # txt += '\n'.join([' '.join(map(str, row)) for row in circular_segs]) + '\n' - # else: - # f = 'circular_routing.csv' - # np.savetxt(f, circular_segs, fmt='%d', delimiter=',', header=txt) - # txt += 'See {} for details.'.format(f) - # if verbose: - # print(txt) - # break - # # the array of segment sequence is useful for other other operations, - # # such as plotting elevation profiles - # self.outsegs[per] = all_outsegs - # + # use graph instead of above loop nrow = len(self.segment_data[per].nseg) ncol = np.max( @@ -1275,12 +1210,6 @@ def get_outlets(self, level=0, verbose=True): all_outsegs[i, : len(v)] = v all_outsegs.sort(axis=0) self.outsegs[per] = all_outsegs - # create a dictionary listing outlets associated with each segment - # outlet is the last value in each row of outseg array that is != 0 or 999999 - # self.outlets[per] = {i + 1: r[(r != 0) & (r != 999999)][-1] - # if len(r[(r != 0) & (r != 999999)]) > 0 - # else i + 1 - # for i, r in enumerate(all_outsegs.T)} self.outlets[per] = { k: self.paths[k][-1] if k in self.paths else k for k in self.segment_data[per].nseg @@ -1290,7 +1219,7 @@ def get_outlets(self, level=0, verbose=True): def reset_reaches(self): self.reach_data.sort(order=["iseg", "ireach"]) reach_data = self.reach_data - segment_data = list(set(self.reach_data.iseg)) # self.segment_data[0] + segment_data = list(set(self.reach_data.iseg)) reach_counts = np.bincount(reach_data.iseg)[1:] reach_counts = dict(zip(range(1, len(reach_counts) + 1), reach_counts)) ireach = [list(range(1, reach_counts[s] + 1)) for s in segment_data] @@ -1769,7 +1698,6 @@ def _write_reach_data(self, f_sfr): ), "MfList.__tofile() data arg not a recarray" # decide which columns to write - # columns = self._get_item2_names() columns = _get_item2_names( self.nstrm, self.reachinput, @@ -1777,10 +1705,6 @@ def _write_reach_data(self, f_sfr): structured=self.parent.structured, ) - # Add one to the kij indices - # names = self.reach_data.dtype.names - # lnames = [] - # [lnames.append(name.lower()) for name in names] # --make copy of data for multiple calls d = np.array(self.reach_data) for idx in ["k", "i", "j", "node"]: @@ -1973,10 +1897,6 @@ def write_file(self, filename=None): """ - # tabfiles = False - # tabfiles_dict = {} - # transroute = False - # reachinput = False if filename is not None: self.fn_path = filename @@ -2034,7 +1954,6 @@ def write_file(self, filename=None): f_sfr.write("\n") if icalc == 4: - # nstrpts = self.segment_data[i][j][5] for k in range(3): for d in self.channel_flow_data[i][nseg][k]: f_sfr.write(f"{d:.2f} ") @@ -2405,7 +2324,6 @@ def numbering(self): passed = False if self.verbose: print(headertxt.strip()) - # for per, segment_data in self.segment_data.items(): inds = (sd.outseg < sd.nseg) & (sd.outseg > 0) @@ -2436,7 +2354,6 @@ def routing(self): if self.verbose: print(headertxt.strip()) - # txt += self.sfr.get_outlets(level=self.level, verbose=False) # will print twice if verbose=True # simpler check method using paths from routing graph circular_segs = [k for k, v in self.sfr.paths.items() if v is None] if len(circular_segs) > 0: @@ -2838,10 +2755,6 @@ def elevations(self, min_strtop=-10, max_strtop=15000): # (for other uses). Not sure if other check methods should also copy reach_data directly from # SFR package instance for consistency. - # use outreach values to get downstream elevations - # non_outlets = reach_data[reach_data.outreach != 0] - # outreach_elevdn = np.array([reach_data.strtop[o - 1] for o in reach_data.outreach]) - # d_strtop = outreach_elevdn[reach_data.outreach != 0] - non_outlets.strtop rd = recfunctions.append_fields( rd, names=["strtopdn", "d_strtop"], @@ -3320,8 +3233,6 @@ def _parse_1c(line, reachinput, transroute): """ na = 0 - # line = _get_dataset(line, [0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 1, 30, 1, 2, 0.75, 0.0001, []]) - # line = line.strip().split() line = line_parse(line) nstrm = int(line.pop(0)) @@ -3398,7 +3309,6 @@ def _parse_6a(line, option): ------- a list of length 13 containing all variables for Data Set 6a """ - # line = line.strip().split() line = line_parse(line) xyz = [] diff --git a/flopy/modflow/mfswi2.py b/flopy/modflow/mfswi2.py index 4e4672458..bc783208d 100644 --- a/flopy/modflow/mfswi2.py +++ b/flopy/modflow/mfswi2.py @@ -476,7 +476,6 @@ def write_file(self, check=True, f=None): if self.nobs > 0: f.write("# Dataset 8\n") for i in range(self.nobs): - # f.write(self.obsnam[i] + 3 * '%10i' % self.obslrc + '\n') f.write(f"{self.obsnam[i]} ") for v in self.obslrc[i, :]: f.write(f"{v + 1:10d}") diff --git a/flopy/modflow/mfswt.py b/flopy/modflow/mfswt.py index a769be0f3..3a43cbfe9 100644 --- a/flopy/modflow/mfswt.py +++ b/flopy/modflow/mfswt.py @@ -821,9 +821,6 @@ def load(cls, f, model, ext_unit_dict=None): print(f" loading swt dataset 15 for layer {kk}") ids16 = np.empty(26, dtype=np.int32) ids16 = read1d(f, ids16) - # for k in range(1, 26, 2): - # model.add_pop_key_list(ids16[k]) - # ids16[k] = 2054 # all sub-wt data sent to unit 2054 # dataset 17 ids17 = [0] * iswtoc for k in range(iswtoc): diff --git a/flopy/modpath/mp6.py b/flopy/modpath/mp6.py index e1c4b2c70..7f8b04d66 100644 --- a/flopy/modpath/mp6.py +++ b/flopy/modpath/mp6.py @@ -16,7 +16,7 @@ class Modpath6List(Package): def __init__(self, model, extension="list", listunit=7): # call base package constructor super().__init__(model, extension, "LIST", listunit) - # self.parent.add_package(self) This package is not added to the base + # This package is not added to the base # model so that it is not included in get_name_file_entries() return @@ -440,9 +440,6 @@ def append_node(ifaces_well, wellid, node_number, k, i, j): append_node(side_faces, wellid, n, k, i, j) elif package.upper() == "RCH": ParticleGenerationOption = 1 - # for j in range(nrow): - # for i in range(ncol): - # group_name.append('rch') group_name.append("rch") group_placement.append( [ diff --git a/flopy/modpath/mp6bas.py b/flopy/modpath/mp6bas.py index 7e9333f11..141e8397e 100644 --- a/flopy/modpath/mp6bas.py +++ b/flopy/modpath/mp6bas.py @@ -152,7 +152,6 @@ def write_file(self): for i in range(self.def_face_ct): f_bas.write(f"{self.bud_label[i]:20s}\n") f_bas.write(f"{self.def_iface[i]:2d}\n") - # f_bas.write('\n') # need to reset lc fmtin lc = self.laytyp @@ -161,7 +160,6 @@ def write_file(self): # from modpath bas--uses keyword array types f_bas.write(self.ibound.get_file_entry()) # from MT3D bas--uses integer array types - # f_bas.write(self.ibound.get_file_entry()) f_bas.write(self.prsity.get_file_entry()) f_bas.write(self.prsityCB.get_file_entry()) diff --git a/flopy/modpath/mp6sim.py b/flopy/modpath/mp6sim.py index f2f9ea6ac..1cf465958 100644 --- a/flopy/modpath/mp6sim.py +++ b/flopy/modpath/mp6sim.py @@ -150,17 +150,6 @@ def __init__( self.retard_fac = retard_fac self.retard_fcCB = retard_fcCB - # self.mask_nlay = Util3d(model,(nlay,nrow,ncol),np.int32,\ - # mask_nlay,name='mask_nlay',locat=self.unit_number[0]) - # self.mask_1lay = Util3d(model,(nlay,nrow,ncol),np.int32,\ - # mask_1lay,name='mask_1lay',locat=self.unit_number[0]) - # self.stop_zone = Util3d(model,(nlay,nrow,ncol),np.int32,\ - # stop_zone,name='stop_zone',locat=self.unit_number[0]) - # self.retard_fac = Util3d(model,(nlay,nrow,ncol),np.float32,\ - # retard_fac,name='retard_fac',locat=self.unit_number[0]) - # self.retard_fcCB = Util3d(model,(nlay,nrow,ncol),np.float32,\ - # retard_fcCB,name='retard_fcCB',locat=self.unit_number[0]) - self.parent.add_package(self) def check(self, f=None, verbose=True, level=1, checktype=None): diff --git a/flopy/modpath/mp7.py b/flopy/modpath/mp7.py index be810c233..75d4a0ce8 100644 --- a/flopy/modpath/mp7.py +++ b/flopy/modpath/mp7.py @@ -29,7 +29,7 @@ def __init__(self, model, extension="list", unitnumber=None): # call base package constructor super().__init__(model, extension, "LIST", unitnumber) - # self.parent.add_package(self) This package is not added to the base + # This package is not added to the base # model so that it is not included in get_name_file_entries() return diff --git a/flopy/mt3d/mt.py b/flopy/mt3d/mt.py index ce295791a..45599acee 100644 --- a/flopy/mt3d/mt.py +++ b/flopy/mt3d/mt.py @@ -200,13 +200,8 @@ def __init__( # the starting external data unit number self._next_ext_unit = 2000 if external_path is not None: - # assert model_ws == '.', "ERROR: external cannot be used " + \ - # "with model_ws" - - # external_path = os.path.join(model_ws, external_path) if os.path.exists(external_path): print(f"Note: external_path {external_path} already exists") - # assert os.path.exists(external_path),'external_path does not exist' else: os.mkdir(external_path) self.external = True @@ -509,9 +504,6 @@ def load( namefile_path, mt.mfnam_packages, verbose=verbose ) except Exception as e: - # print("error loading name file entries from file") - # print(str(e)) - # return None raise Exception( f"error loading name file entries from file:\n{e!s}" ) diff --git a/flopy/mt3d/mtcts.py b/flopy/mt3d/mtcts.py index 396d3bc08..6e2eba045 100644 --- a/flopy/mt3d/mtcts.py +++ b/flopy/mt3d/mtcts.py @@ -144,19 +144,6 @@ def __init__( self, ): raise NotImplementedError() - # # unit number - # if unitnumber is None: - # unitnumber = self.unitnumber - # Package.__init__(self, model, extension, 'CTS', self.unitnumber) - # - # # Set dimensions - # nrow = model.nrow - # ncol = model.ncol - # nlay = model.nlay - # ncomp = model.ncomp - # mcomp = model.mcomp - - # Set package specific parameters @classmethod def load( @@ -201,70 +188,6 @@ def load( raise NotImplementedError() - # if model.verbose: - # sys.stdout.write('loading cts package file...\n') - # - # # Open file, if necessary - # openfile = not hasattr(f, 'read') - # if openfile: - # filename = f - # f = open(filename, 'r') - # - # # Set dimensions if necessary - # if nlay is None: - # nlay = model.nlay - # if nrow is None: - # nrow = model.nrow - # if ncol is None: - # ncol = model.ncol - # if nper is None: - # nper = model.nper - # if ncomp is None: - # ncomp = model.ncomp - # - # # Item 1 (MXCTS, ICTSOUT, MXEXT, MXINJ, MXWEL, IFORCE) - # line = f.readline() - # if line[0] == '#': - # raise ValueError('CTS package does not support comment lines') - # if model.verbose: - # print(' loading MXCTS, ICTSOUT, MXEXT, MXINJ, MXWEL, IFORCE...') - # - # m_arr = line.strip().split() - # mxcts = int(m_arr[0]) - # ictsout = int(m_arr[1]) - # mxext = int(m_arr[2]) - # mxinj = int(m_arr[3]) - # mxwel = int(m_arr[4]) - # iforce = int(m_arr[5]) - # - # # Start of transient data - # for iper in range(nper): - # - # if model.verbose: - # print(' loading CTS data for kper {0:5d}'.format(iper + 1)) - # - # # Item 2 (NCTS) - # line = f.readline() - # m_arr = line.strip().split() - # ncts = int(m_arr[0]) - # - # # Start of information for each CTS - # for icts in range(ncts): - # - # if model.verbose: - # print(' loading data for system #{0:5d}' - # .format(icts + 1)) - # # Item 3 (ICTS, NEXT, NINJ, ITRTINJ) - # line = f.readline() - # m_arr = line.strip().split() - # icts = int(m_arr[0]) - # next = int(m_arr[1]) - # ninj = int(m_arr[2]) - # itrtinj = int(m_arr[3]) - # - # if openfile: - # f.close() - @staticmethod def get_default_CTS_dtype(ncomp=1, iforce=0): """ @@ -273,53 +196,6 @@ def get_default_CTS_dtype(ncomp=1, iforce=0): raise NotImplementedError() - # # Item 3 - # type_list = [("icts", int), ("next", int), ("ninj", int), - # ("itrtinj", int)] - # - # # Create a list for storing items 5, 6, & 9 - # items_5_6_7_9_list = [] - # if ncomp > 1: - # # Item 5 in CTS input - # for comp in range(1, ncomp+1): - # qincts_name = "qincts{0:d}".format(comp) - # cincts_name = "cincts{0:d}".format(comp) - # items_5_6_7_9_list.append((qincts_name, np.float32)) - # items_5_6_7_9_list.append((cincts_name, np.float32)) - # - # # Item 6 in CTS input - # for comp in range(1, ncomp+1): - # ioptinj_name = "ioptinj{0:d}".format(comp) - # cmchginj_name = "cmchginj{0:d}".format(comp) - # items_5_6_7_9_list.append((ioptinj_name, int)) - # items_5_6_7_9_list.append((cmchginj_name, np.float32)) - # - # if iforce == 0: - # for comp in range(1, ncomp+1): - # cnte_name = "cnte{0:d}".format(comp) - # items_5_6_7_9_list.append(cnte_name, np.float32) - # - # # Item 9 in CTS input - # items_5_6_7_9_list.append(("qoutcts", np.float32)) - # - # type_list.append(items_5_6_7_9_list) - # - # # Now create a list for the records in Item 4 - # ext_wels_list = [("kext", int), ("iext", int), ("jext", int), - # ("iwext", int)] - # - # type_list.append(ext_wels_list) - # - # # Now create a list for the records in Item 8 - # inj_wels_list = [("kinj", int), ("iinj", int), ("jinj", int), - # ("iwinj", int)] - # type_list.append(inj_wels_list) - # - # # - # - # dtype = np.dtype(type_list) - # dtype = dtype - @staticmethod def _ftype(): return "CTS" diff --git a/flopy/mt3d/mtdsp.py b/flopy/mt3d/mtdsp.py index b0dd15512..c7811c013 100644 --- a/flopy/mt3d/mtdsp.py +++ b/flopy/mt3d/mtdsp.py @@ -436,12 +436,6 @@ def load( ext_unit_dict, array_format="mt3d", ) - # if model.mcomp > 1: - # for icomp in range(2, model.mcomp + 1): - # name = "dmcoef" + str(icomp + 1) - # u2d = Util2d.load(f, model, (nlay,), np.float32, name, - # ext_unit_dict, array_format="mt3d") - # kwargs[name] = u2d if openfile: f.close() diff --git a/flopy/mt3d/mtssm.py b/flopy/mt3d/mtssm.py index 7bc8416f7..8887c57cd 100644 --- a/flopy/mt3d/mtssm.py +++ b/flopy/mt3d/mtssm.py @@ -304,19 +304,6 @@ def __init__( array_free_format=False, ) self.crch.append(t2d) - # else: - # try: - # if model.mf.rch is not None: - # print("found 'rch' in modflow model, resetting crch to 0.0") - # self.crch = [Transient2d(model, (nrow, ncol), np.float32, - # 0, name='crch1', - # locat=self.unit_number[0], - # array_free_format=False)] - # - # else: - # self.crch = None - # except: - # self.crch = None self.cevt = None try: @@ -366,20 +353,6 @@ def __init__( ) self.cevt.append(t2d) - # else: - # try: - # if model.mf.evt is not None or model.mf.ets is not None: - # print("found 'ets'/'evt' in modflow model, resetting cevt to 0.0") - # self.cevt = [Transient2d(model, (nrow, ncol), np.float32, - # 0, name='cevt1', - # locat=self.unit_number[0], - # array_free_format=False)] - # - # else: - # self.cevt = None - # except: - # self.cevt = None - if len(list(kwargs.keys())) > 0: raise Exception( "SSM error: unrecognized kwargs: " diff --git a/flopy/mt3d/mttob.py b/flopy/mt3d/mttob.py index 0f2f82575..1d7927e48 100644 --- a/flopy/mt3d/mttob.py +++ b/flopy/mt3d/mttob.py @@ -70,8 +70,7 @@ def write_file(self): f_tob.write( "%s%10d%10d%10d\n" % (self.outnam, inConcObs, inFluxObs, inSaveObs) ) - # if (inConcObs): - # + if inFluxObs: nFluxGroup = len(self.FluxGroups) f_tob.write( diff --git a/flopy/plot/crosssection.py b/flopy/plot/crosssection.py index bca4b47fb..41231211c 100644 --- a/flopy/plot/crosssection.py +++ b/flopy/plot/crosssection.py @@ -797,9 +797,6 @@ def plot_grid(self, **kwargs): col = self.get_grid_line_collection(**kwargs) if col is not None: ax.add_collection(col) - # ax.set_xlim(self.extent[0], self.extent[1]) - # ax.set_ylim(self.extent[2], self.extent[3]) - return col def plot_bc( diff --git a/flopy/plot/plotutil.py b/flopy/plot/plotutil.py index 57af252a4..ccbe5a272 100644 --- a/flopy/plot/plotutil.py +++ b/flopy/plot/plotutil.py @@ -346,9 +346,7 @@ def _plot_package_helper(package, **kwargs): ) elif isinstance(value, DataInterface): - if ( - value.data_type == DataType.transientlist - ): # isinstance(value, (MfList, MFTransientList)): + if value.data_type == DataType.transientlist: if package.parent.verbose: print( "plotting {} package MfList instance: {}".format( @@ -404,9 +402,7 @@ def _plot_package_helper(package, **kwargs): if ax is not None: caxs.append(ax) - elif ( - value.data_type == DataType.array3d - ): # isinstance(value, Util3d): + elif value.data_type == DataType.array3d: if value.array is not None: if package.parent.verbose: print( @@ -414,7 +410,6 @@ def _plot_package_helper(package, **kwargs): package.name[0], item ) ) - # fignum = list(range(ifig, ifig + inc)) fignum = list( range( defaults["initial_fig"], @@ -438,9 +433,7 @@ def _plot_package_helper(package, **kwargs): ) ) - elif ( - value.data_type == DataType.array2d - ): # isinstance(value, Util2d): + elif value.data_type == DataType.array2d: if value.array is not None: if len(value.array.shape) == 2: # is this necessary? if package.parent.verbose: @@ -470,9 +463,7 @@ def _plot_package_helper(package, **kwargs): ) ) - elif ( - value.data_type == DataType.transient2d - ): # isinstance(value, Transient2d): + elif value.data_type == DataType.transient2d: if value.array is not None: if package.parent.verbose: print( @@ -1658,7 +1649,6 @@ def line_intersect_grid(ptsin, xgrid, ygrid): ua = np.ones(denom.shape, dtype=denom.dtype) * np.nan idx = np.where(denom != 0.0) ua[idx] = numa[idx] / denom[idx] - # ub = numb / denom del numa del numb del denom diff --git a/flopy/seawat/swt.py b/flopy/seawat/swt.py index b10a41b58..7339b6991 100644 --- a/flopy/seawat/swt.py +++ b/flopy/seawat/swt.py @@ -145,10 +145,8 @@ def __init__( model_ws == "." ), "ERROR: external cannot be used with model_ws" - # external_path = os.path.join(model_ws, external_path) if os.path.exists(external_path): print(f"Note: external_path {external_path} already exists") - # assert os.path.exists(external_path),'external_path does not exist' else: os.mkdir(external_path) self.external = True @@ -294,17 +292,12 @@ def _set_name(self, value): # Overrides BaseModel's setter for name property super()._set_name(value) - # for i in range(len(self.lst.extension)): - # self.lst.file_name[i] = self.name + '.' + self.lst.extension[i] - # return - def change_model_ws(self, new_pth=None, reset_external=False): # if hasattr(self,"_mf"): if self._mf is not None: self._mf.change_model_ws( new_pth=new_pth, reset_external=reset_external ) - # if hasattr(self,"_mt"): if self._mt is not None: self._mt.change_model_ws( new_pth=new_pth, reset_external=reset_external diff --git a/flopy/seawat/swtvsc.py b/flopy/seawat/swtvsc.py index f7bcaf319..368bd1f6a 100644 --- a/flopy/seawat/swtvsc.py +++ b/flopy/seawat/swtvsc.py @@ -227,14 +227,6 @@ def write_file(self): if self.mt3dmuflg == -1: f_vsc.write(f"{self.viscref}\n") f_vsc.write(f"{self.nsmueos} {self.mutempopt}\n") - # if self.nsmueos == 1: - # f_vsc.write('{} {} {}\n'.format(self.mtmuspec, self.dmudc, - # self.cmuref)) - # else: - # for iwr in range(self.nsmueos): - # f_vsc.write('{} {} {}\n'.format(self.mtmuspec[iwr], - # self.dmudc[iwr], - # self.cmuref[iwr])) if self.nsmueos > 0: for iwr in range(self.nsmueos): f_vsc.write( diff --git a/flopy/utils/binaryfile.py b/flopy/utils/binaryfile.py index a7d57c4e4..28606edb6 100644 --- a/flopy/utils/binaryfile.py +++ b/flopy/utils/binaryfile.py @@ -1174,8 +1174,6 @@ def _totim_from_kstpkper(self, kstpkper): kstp_len = [dt1] for i in range(kstp + 1): kstp_len.append(kstp_len[-1] * tsmult) - # kstp_len = np.array(kstp_len) - # kstp_len = kstp_len[:kstp].sum() kstp_len = sum(kstp_len[: kstp + 1]) return kper_len + kstp_len diff --git a/flopy/utils/check.py b/flopy/utils/check.py index 69f8c2ba5..662643d60 100644 --- a/flopy/utils/check.py +++ b/flopy/utils/check.py @@ -254,8 +254,6 @@ def _get_summary_array(self, array=None): if array is None: return np.recarray((0), dtype=dtype) ra = recarray(array, dtype) - # at = array.transpose() - # a = np.core.records.fromarrays(at, dtype=dtype) return ra def _txt_footer( @@ -360,9 +358,6 @@ def _list_spd_check_violations( stress_period_data where criteria=True. """ inds_col = self._get_cell_inds_names() - # inds = stress_period_data[criteria][inds_col]\ - # .reshape(stress_period_data[criteria].shape + (-1,)) - # inds = np.atleast_2d(np.squeeze(inds.tolist())) inds = stress_period_data[criteria] a = self._get_cellid_cols(inds, inds_col) inds = a.view(int) @@ -489,15 +484,6 @@ def stress_period_data_values( name, k,i,j indices, values, and description of error for each row in stress_period_data where criteria=True. """ - # check for valid cell indices - # self._stress_period_data_valid_indices(stress_period_data) - - # first check for and list nan values - # self._stress_period_data_nans(stress_period_data) - - # next check for BCs in inactive cells - # self._stress_period_data_inactivecells(stress_period_data) - if np.any(criteria): # list the values that met the criteria sa = self._list_spd_check_violations( diff --git a/flopy/utils/compare.py b/flopy/utils/compare.py index 7aad59637..3e4100fba 100644 --- a/flopy/utils/compare.py +++ b/flopy/utils/compare.py @@ -850,8 +850,6 @@ def compare_heads( v1 = h1.flatten()[ind] v2 = h2.flatten()[ind] d12 = v1 - v2 - # e += ' ' + fmtn.format(jdx + 1) + ' node: ' - # e += fmtn.format(ind + 1) # convert to one-based e += " " + fmtn.format(jdx + 1) e += f" {iv}" e += " -- " diff --git a/flopy/utils/gridgen.py b/flopy/utils/gridgen.py index d93e46c10..27e9b1b34 100644 --- a/flopy/utils/gridgen.py +++ b/flopy/utils/gridgen.py @@ -14,7 +14,7 @@ from ..modflow import ModflowDis from ..utils import import_optional_dependency from ..utils.flopy_io import relpath_safe -from .util_array import Util2d # read1d, +from .util_array import Util2d # todo # creation of line and polygon shapefiles from features (holes!) diff --git a/flopy/utils/gridintersect.py b/flopy/utils/gridintersect.py index 16c14a272..eed25d310 100644 --- a/flopy/utils/gridintersect.py +++ b/flopy/utils/gridintersect.py @@ -913,10 +913,6 @@ def _intersect_point_shapely2( keep_cid = qcellids names = ["cellids", "ixshapes"] - # self.mfgrid.grid_type == "structured": - # cid_dtype = "i" - # else: - # cid_dtype = "O" formats = ["O", "O"] rec = np.recarray(len(keep_pts), names=names, formats=formats) @@ -2265,7 +2261,6 @@ def find_position_in_array(arr, x): xr = arr[j + 1] frac = (x - xl) / (xr - xl) if 0.0 <= frac <= 1.0: - # if min(xl, xr) <= x < max(xl, xr): jpos.append(j) if len(jpos) == 0: return None diff --git a/flopy/utils/lgrutil.py b/flopy/utils/lgrutil.py index f382c97eb..b17839483 100644 --- a/flopy/utils/lgrutil.py +++ b/flopy/utils/lgrutil.py @@ -164,7 +164,7 @@ def __init__( idxl, idxr, idxc = np.where(idomainp == 0) assert idxl.shape[0] > 1, "no zero values found in idomain" - # # child cells per parent and child cells per parent layer + # child cells per parent and child cells per parent layer self.ncpp = ncpp self.ncppl = Util2d(m, (nlayp,), np.int32, ncppl, "ncppl").array diff --git a/flopy/utils/mflistfile.py b/flopy/utils/mflistfile.py index df952c985..33f13b77f 100644 --- a/flopy/utils/mflistfile.py +++ b/flopy/utils/mflistfile.py @@ -651,7 +651,6 @@ def _get_index(self, maxentries): line, ) break - # print('info found for timestep stress period',ts,sp) idxs.append([ts, sp, seekpoint]) diff --git a/flopy/utils/mfreadnam.py b/flopy/utils/mfreadnam.py index 47ab6da50..ba833b842 100644 --- a/flopy/utils/mfreadnam.py +++ b/flopy/utils/mfreadnam.py @@ -608,7 +608,6 @@ def get_mf6_files(mfnamefile): if len(olist) > 0: outplist = outplist + olist # terminate loop if no additional files - # if len(flist) < 1 and len(olist) < 1: if len(flist) < 1: break diff --git a/flopy/utils/mtlistfile.py b/flopy/utils/mtlistfile.py index 5faf5eb3f..7bae5ea04 100644 --- a/flopy/utils/mtlistfile.py +++ b/flopy/utils/mtlistfile.py @@ -136,13 +136,6 @@ def parse( df_gw = pd.DataFrame(self.gw_data) df_gw.loc[:, "totim"] = df_gw.pop("totim_1") - # if cumulative: - # keep = [c for c in df_gw.columns if "_flx" not in c] - # df_gw = df_gw.loc[:,keep] - # else: - # keep = [c for c in df_gw.columns if "_cum" not in c] - # df_gw = df_gw.loc[:, keep] - if diff: df_gw = self._diff(df_gw) @@ -166,13 +159,6 @@ def parse( df_sw = pd.DataFrame(self.sw_data) df_sw.loc[:, "totim"] = df_gw.totim.iloc[:min_len].values - # if cumulative: - # keep = [c for c in df_sw.columns if "_flx" not in c] - # df_sw = df_sw.loc[:, keep] - # else: - # keep = [c for c in df_sw.columns if "_cum" not in c] - # df_sw = df_sw.loc[:, keep] - if diff: df_sw = self._diff(df_sw) if start_datetime is not None: @@ -463,10 +449,8 @@ def _parse_sw(self, f, line): f"error parsing 'out' SW items on line {self.lcount}: {e!s}" ) self._add_to_sw_data("net", item, cval, fval, comp) - # out_tots = self._parse_sw_line(line) def _parse_sw_line(self, line): - # print(line) raw = line.strip().split("=") citem = raw[0].strip().strip(r"[\|]").replace(" ", "_") cval = float(raw[1].split()[0]) @@ -476,7 +460,6 @@ def _parse_sw_line(self, line): else: fitem = raw[1].split()[-1].replace(" ", "_") fval = float(raw[2]) - # assert citem == fitem,"{0}, {1}".format(citem,fitem) return citem, cval, fval def _add_to_sw_data(self, inout, item, cval, fval, comp): diff --git a/flopy/utils/observationfile.py b/flopy/utils/observationfile.py index c4fc30710..8d85aaf3d 100644 --- a/flopy/utils/observationfile.py +++ b/flopy/utils/observationfile.py @@ -308,10 +308,6 @@ def __init__(self, filename, verbose=False, isBinary="auto"): # get number of observations self.nobs = self.read_integer() - # # continue reading the file - # self.v = np.empty(self.nobs, dtype=float) - # self.v.fill(1.0E+32) - # read obsnames obsnames = [] for idx in range(0, self.nobs): diff --git a/flopy/utils/swroutputfile.py b/flopy/utils/swroutputfile.py index e15948de2..991571eba 100644 --- a/flopy/utils/swroutputfile.py +++ b/flopy/utils/swroutputfile.py @@ -543,7 +543,6 @@ def _read_qaq(self): for irch in range(self.nrecord): klay = self.itemlist[irch] for k in range(klay): - # r[idx, 0] = irch reaches[idx] = irch idx += 1 diff --git a/flopy/utils/util_array.py b/flopy/utils/util_array.py index 637036281..8d341f51a 100644 --- a/flopy/utils/util_array.py +++ b/flopy/utils/util_array.py @@ -6,8 +6,6 @@ """ -# from future.utils import with_metaclass - import copy import os import shutil @@ -255,7 +253,6 @@ def __setattr__(self, key, value): elif key.lower() == "binary": value = bool(value) if value and self.free: - # raise Exception("cannot switch from 'free' to 'binary' format") self._isfree = False self._isbinary = value self._set_defaults() @@ -263,7 +260,6 @@ def __setattr__(self, key, value): elif key.lower() == "free": value = bool(value) if value and self.binary: - # raise Exception("cannot switch from 'binary' to 'free' format") self._isbinary = False self._isfree = bool(value) self._set_defaults() @@ -771,7 +767,6 @@ def array(self): if nrow is not None: # typical 3D case a = np.empty((self.shape), dtype=self._dtype) - # for i,u2d in self.uds: for i, u2d in enumerate(self.util_2ds): a[i] = u2d.array else: @@ -1226,7 +1221,6 @@ def __get_3d_instance(self, kper, arg): arg, fmtin=self.fmtin, name=name, - # ext_filename=ext_filename, locat=self.locat, array_free_format=self.array_free_format, ) @@ -2143,9 +2137,6 @@ def python_file_path(self): ------- file_path (str) : path relative to python: includes model_ws """ - # if self.vtype != str: - # raise Exception("Util2d call to python_file_path " + - # "for vtype != str") python_file_path = "" if self._model.model_ws != ".": python_file_path = os.path.join(self._model.model_ws) @@ -2258,8 +2249,6 @@ def get_openclose_cr(self): def get_external_cr(self): locat = self._model.next_ext_unit() - # if self.format.binary: - # locat = -1 * np.abs(locat) self._model.add_external( self.model_file_path, locat, self.format.binary ) @@ -2838,11 +2827,6 @@ def load( curr_unit = cunit break - # Allows for special MT3D array reader - # array_format = None - # if hasattr(model, 'array_format'): - # array_format = model.array_format - cr_dict = Util2d.parse_control_record( f_handle.readline(), current_unit=curr_unit, @@ -2872,7 +2856,6 @@ def load( fname = fname.replace('"', "") fname = fname.replace("\\", os.path.sep) fname = os.path.join(model.model_ws, fname) - # load_txt(shape, file_in, dtype, fmtin): assert os.path.exists( fname ), f"Util2d.load() error: open/close file {fname} not found" @@ -3005,7 +2988,6 @@ def parse_control_record( nunit = abs(int(raw[1])) if ext_unit_dict is not None: try: - # td = ext_unit_dict[int(raw[1])] fname = ext_unit_dict[nunit].filename.strip() except: print( @@ -3045,8 +3027,6 @@ def parse_control_record( cnstnt = int(line[10:20].strip()) else: cnstnt = 0 - # if cnstnt == 0: - # cnstnt = 1 if locat != 0: if len(line) >= 40: fmtin = line[20:40].strip() @@ -3056,10 +3036,6 @@ def parse_control_record( iprn = int(line[40:50].strip()) except: iprn = 0 - # locat = int(raw[0]) - # cnstnt = float(raw[1]) - # fmtin = raw[2].strip() - # iprn = int(raw[3]) if locat == 0: freefmt = "constant" elif locat < 0: diff --git a/flopy/utils/util_list.py b/flopy/utils/util_list.py index 0ce2916b6..12ba6ac10 100644 --- a/flopy/utils/util_list.py +++ b/flopy/utils/util_list.py @@ -312,7 +312,6 @@ def __cast_data(self, data): # If data is a list, then all we can do is try to cast it to # an ndarray, then cast again to a recarray if isinstance(data, list): - # warnings.warn("MfList casting list to array") try: data = np.array(data) except Exception as e: @@ -557,8 +556,6 @@ def __getitem__(self, kper): # If the data entry for kper is a string, # return the corresponding recarray, # but don't reset the value in the data dict - # assert kper in list(self.data.keys()), "MfList.__getitem__() kper " + \ - # str(kper) + " not in data.keys()" try: kper = int(kper) except Exception as e: @@ -588,7 +585,6 @@ def __setitem__(self, kper, data): # If data is a list, then all we can do is try to cast it to # an ndarray, then cast again to a recarray if isinstance(data, list): - # warnings.warn("MfList casting list to array") try: data = np.array(data) except Exception as e: @@ -612,7 +608,6 @@ def __setitem__(self, kper, data): ) def __fromfile(self, f): - # d = np.fromfile(f,dtype=self.dtype,count=count) try: d = np.genfromtxt(f, dtype=self.dtype) except Exception as e: @@ -638,9 +633,6 @@ def get_filenames(self): self._model.array_free_format and self._model.external_path is not None ): - # py_filepath = '' - # py_filepath = os.path.join(py_filepath, - # self._model.external_path) filename = f"{self.package.name[0]}_{kper:04d}.dat" filenames.append(filename) return filenames @@ -897,7 +889,6 @@ def attribute_by_kper(self, attr, function=np.mean, idx_val=None): kper_data = kper_data[ np.where(kper_data[idx_val[0]] == idx_val[1]) ] - # kper_vtype = self.__vtype[kper] v = function(kper_data[attr]) values.append(v) return values @@ -1093,7 +1084,6 @@ def to_array(self, kper=0, mask=False): (self._model.nlay, self._model.nrow, self._model.ncol), dtype=float, ) - # print(name,kper) for rec in sarr: if unstructured: arr[rec["node"]] += rec[name] @@ -1110,9 +1100,6 @@ def to_array(self, kper=0, mask=False): arr[cnt == 0.0] = np.nan arrays[name] = arr.copy() - # elif mask: - # for name, arr in arrays.items(): - # arrays[name][:] = np.nan return arrays @property diff --git a/flopy/utils/zonbud.py b/flopy/utils/zonbud.py index 91a0a8910..336572dec 100644 --- a/flopy/utils/zonbud.py +++ b/flopy/utils/zonbud.py @@ -169,8 +169,6 @@ def __init__( self._zonenamedict[z] = "_".join(a.split()) seen.append(z) - # self._iflow_recnames = self._get_internal_flow_record_names() - # All record names in the cell-by-cell budget binary file self.record_names = [ n.strip() for n in self.cbc.get_unique_record_names(decode=True) @@ -2423,7 +2421,7 @@ def _recarray_to_dataframe( else: index_cols = ["time_step", "stress_period", "name"] - df = df.set_index(index_cols) # .sort_index(level=0) + df = df.set_index(index_cols) if zones is not None: keep_cols = zones else: diff --git a/scripts/process_benchmarks.py b/scripts/process_benchmarks.py index 1b29da52e..b934c5b33 100644 --- a/scripts/process_benchmarks.py +++ b/scripts/process_benchmarks.py @@ -16,7 +16,6 @@ json_paths = list(Path(indir).rglob("*.json")) print(f"Found {len(json_paths)} JSON files") -# pprint([str(p) for p in json_paths]) def get_benchmarks(paths): @@ -37,8 +36,6 @@ def get_benchmarks(paths): fullname = benchmark["fullname"] included = [ "min", - # 'max', - # 'median', "mean", ] for stat, value in benchmark["stats"].items(): @@ -175,6 +172,5 @@ def seaborn_plot(stats): stats = pd.DataFrame(case).groupby("stat") case_name = str(case_name).replace("/", "_").replace(":", "_") - # fig = matplotlib_plot(stats) fig = seaborn_plot(stats) plt.savefig(str(outdir / f"{case_name}.png")) From d36bb78c3b7a12ab6f77bbe31e3572915753c86b Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Wed, 12 Jun 2024 06:24:08 +1200 Subject: [PATCH 19/57] feat(datafile): add .headers property with data frame (#2221) This feature adds a .headers property for a pandas data frame of the headers of various data files, including HeadFile, FormattedHeadFile, UcnFile and CellBudgetFile. This is a modern accessor to the headers, which is created internally using: pd.DataFrame(self.recordarray, index=self.iposarray) where the index is the file position to the start of each array. Text fields are decoded to str types (rather than dealing with bytes types, which is probably a hang-over from Python2). Int32 types are left as-is, except for iposarray which should be int64 to read large files >2GB. Float32 types are also left as-is, since these would otherwise have a lossy conversion to float64. With the CellBudgetFile, the headers vary depending on the type of budget file. For instance, "classic" files (created without "COMPACT BUDGET" option) don't have imeth, delt, pertim, or totim columns. Furthermore, only files with imeth=6 have the extra text columns modelnam, paknam, modelnam2, and paknam2, since these fields are always empty. This PR also adds more checks to the outputs created with ._build_index() methods. Eventually, I'm planning to deprecate other properties and functions that are using the .recordarray structured array, so it is important to have a detailed trace of these outputs before replacing them. This PR also moves the __enter__ / __exit__ methods from BinaryLayerFile to LayerFile (this feature was from #669). This is so FormattedHeadFile can also use the "with" context statement to also auto-close the file. --- autotest/test_binaryfile.py | 112 +++++++++++++ autotest/test_cellbudgetfile.py | 277 ++++++++++++++++++++++++++++++++ autotest/test_formattedfile.py | 53 ++++++ flopy/utils/binaryfile.py | 37 ++++- flopy/utils/datafile.py | 6 + flopy/utils/formattedfile.py | 9 +- 6 files changed, 486 insertions(+), 8 deletions(-) diff --git a/autotest/test_binaryfile.py b/autotest/test_binaryfile.py index 1dff66332..b8c7df751 100644 --- a/autotest/test_binaryfile.py +++ b/autotest/test_binaryfile.py @@ -1,6 +1,7 @@ from itertools import repeat import numpy as np +import pandas as pd import pytest from matplotlib import pyplot as plt from matplotlib.axes import Axes @@ -14,6 +15,7 @@ CellBudgetFile, HeadFile, HeadUFile, + UcnFile, Util2d, ) from flopy.utils.binaryfile import ( @@ -71,6 +73,116 @@ def test_deprecated_binaryread_struct(example_data_path): assert res == 20 +def test_headfile_build_index(example_data_path): + # test low-level BinaryLayerFile._build_index() method + pth = example_data_path / "freyberg_multilayer_transient" / "freyberg.hds" + with HeadFile(pth) as hds: + pass + assert hds.nrow == 40 + assert hds.ncol == 20 + assert hds.nlay == 3 + assert not hasattr(hds, "nper") + assert hds.totalbytes == 10_676_004 + assert len(hds.recordarray) == 3291 + assert type(hds.recordarray) == np.ndarray + assert hds.recordarray.dtype == np.dtype( + [ + ("kstp", "i4"), + ("kper", "i4"), + ("pertim", "f4"), + ("totim", "f4"), + ("text", "S16"), + ("ncol", "i4"), + ("nrow", "i4"), + ("ilay", "i4"), + ] + ) + # check first and last recorddict + list_recordarray = hds.recordarray.tolist() + assert list_recordarray[0] == ( + (1, 1, 1.0, 1.0, b" HEAD", 20, 40, 1) + ) + assert list_recordarray[-1] == ( + (1, 1097, 1.0, 1097.0, b" HEAD", 20, 40, 3) + ) + assert hds.times == list((np.arange(1097) + 1).astype(np.float32)) + assert hds.kstpkper == [(1, kper + 1) for kper in range(1097)] + np.testing.assert_array_equal(hds.iposarray, np.arange(3291) * 3244 + 44) + assert hds.iposarray.dtype == np.int64 + # check first and last row of data frame + pd.testing.assert_frame_equal( + hds.headers.iloc[[0, -1]], + pd.DataFrame( + { + "kstp": np.array([1, 1], np.int32), + "kper": np.array([1, 1097], np.int32), + "pertim": np.array([1.0, 1.0], np.float32), + "totim": np.array([1.0, 1097.0], np.float32), + "text": ["HEAD", "HEAD"], + "ncol": np.array([20, 20], np.int32), + "nrow": np.array([40, 40], np.int32), + "ilay": np.array([1, 3], np.int32), + }, + index=[44, 10672804], + ), + ) + + +def test_concentration_build_index(example_data_path): + # test low-level BinaryLayerFile._build_index() method with UCN file + pth = example_data_path / "mt3d_test/mf2005mt3d/P07/MT3D001.UCN" + with UcnFile(pth) as ucn: + pass + assert ucn.nrow == 15 + assert ucn.ncol == 21 + assert ucn.nlay == 8 + assert not hasattr(ucn, "nper") + assert ucn.totalbytes == 10_432 + assert len(ucn.recordarray) == 8 + assert type(ucn.recordarray) == np.ndarray + assert ucn.recordarray.dtype == np.dtype( + [ + ("ntrans", "i4"), + ("kstp", "i4"), + ("kper", "i4"), + ("totim", "f4"), + ("text", "S16"), + ("ncol", "i4"), + ("nrow", "i4"), + ("ilay", "i4"), + ] + ) + # check first and last recorddict + list_recordarray = ucn.recordarray.tolist() + assert list_recordarray[0] == ( + (29, 1, 1, 100.0, b"CONCENTRATION ", 21, 15, 1) + ) + assert list_recordarray[-1] == ( + (29, 1, 1, 100.0, b"CONCENTRATION ", 21, 15, 8) + ) + assert ucn.times == [np.float32(100.0)] + assert ucn.kstpkper == [(1, 1)] + np.testing.assert_array_equal(ucn.iposarray, np.arange(8) * 1304 + 44) + assert ucn.iposarray.dtype == np.int64 + # check first and last row of data frame + pd.testing.assert_frame_equal( + ucn.headers.iloc[[0, -1]], + pd.DataFrame( + { + "ntrans": np.array([29, 29], np.int32), + "kstp": np.array([1, 1], np.int32), + "kper": np.array([1, 1], np.int32), + "totim": np.array([100.0, 100.0], np.float32), + "text": ["CONCENTRATION", "CONCENTRATION"], + "ncol": np.array([21, 21], np.int32), + "nrow": np.array([15, 15], np.int32), + "ilay": np.array([1, 8], np.int32), + }, + index=[44, 9172], + ), + ) + + def test_binaryfile_writeread(function_tmpdir, nwt_model_path): model = "Pr3_MFNWT_lower.nam" ml = flopy.modflow.Modflow.load( diff --git a/autotest/test_cellbudgetfile.py b/autotest/test_cellbudgetfile.py index 175df6037..b7ff7c347 100644 --- a/autotest/test_cellbudgetfile.py +++ b/autotest/test_cellbudgetfile.py @@ -1,11 +1,288 @@ import os import numpy as np +import pandas as pd import pytest from flopy.mf6.modflow.mfsimulation import MFSimulation from flopy.utils.binaryfile import CellBudgetFile +# test low-level CellBudgetFile._build_index() method + + +def test_cellbudgetfile_build_index_classic(example_data_path): + """Test reading "classic" budget file, without "COMPACT BUDGET" option.""" + pth = example_data_path / "mt3d_test/mf2kmt3d/mnw/t5.cbc" + with CellBudgetFile(pth) as cbc: + pass + assert cbc.nrow == 101 + assert cbc.ncol == 101 + assert cbc.nlay == 3 + assert cbc.nper == 1 + assert cbc.totalbytes == 122_448 + assert len(cbc.recordarray) == 1 + assert type(cbc.recordarray) == np.ndarray + assert cbc.recordarray.dtype == np.dtype( + [ + ("kstp", "i4"), + ("kper", "i4"), + ("text", "S16"), + ("ncol", "i4"), + ("nrow", "i4"), + ("nlay", "i4"), + ("imeth", "i4"), + ("delt", "f4"), + ("pertim", "f4"), + ("totim", "f4"), + ("modelnam", "S16"), + ("paknam", "S16"), + ("modelnam2", "S16"), + ("paknam2", "S16"), + ] + ) + assert len(cbc.recorddict) == 1 + list_recorddict = list(cbc.recorddict.items()) + # fmt: off + assert list_recorddict == [( + (1, 1, b" MNW", 101, 101, 3, 0, 0.0, 0.0, -1.0, b"", b"", b"", b""), + 36) + ] + # fmt: on + assert cbc.times == [] + assert cbc.kstpkper == [(1, 1)] + np.testing.assert_array_equal(cbc.iposheader, np.array([0])) + assert cbc.iposheader.dtype == np.int64 + np.testing.assert_array_equal(cbc.iposarray, np.array([36])) + assert cbc.iposarray.dtype == np.int64 + assert cbc.textlist == [b" MNW"] + assert cbc.imethlist == [0] + assert cbc.paknamlist_from == [b""] + assert cbc.paknamlist_to == [b""] + pd.testing.assert_frame_equal( + cbc.headers, + pd.DataFrame( + { + "kstp": np.array([1], np.int32), + "kper": np.array([1], np.int32), + "text": ["MNW"], + "ncol": np.array([101], np.int32), + "nrow": np.array([101], np.int32), + "nlay": np.array([3], np.int32), + }, + index=[36], + ), + ) + + +def test_cellbudgetfile_build_index_compact(example_data_path): + """Test reading mfntw budget file, with "COMPACT BUDGET" option.""" + pth = example_data_path / "freyberg_multilayer_transient" / "freyberg.cbc" + with CellBudgetFile(pth) as cbc: + pass + assert cbc.nrow == 40 + assert cbc.ncol == 20 + assert cbc.nlay == 3 + assert cbc.nper == 1097 + assert cbc.totalbytes == 42_658_384 + assert len(cbc.recordarray) == 5483 + assert type(cbc.recordarray) == np.ndarray + assert cbc.recordarray.dtype == np.dtype( + [ + ("kstp", "i4"), + ("kper", "i4"), + ("text", "S16"), + ("ncol", "i4"), + ("nrow", "i4"), + ("nlay", "i4"), + ("imeth", "i4"), + ("delt", "f4"), + ("pertim", "f4"), + ("totim", "f4"), + ("modelnam", "S16"), + ("paknam", "S16"), + ("modelnam2", "S16"), + ("paknam2", "S16"), + ] + ) + assert len(cbc.recorddict) == 5483 + # check first and last recorddict + list_recorddict = list(cbc.recorddict.items()) + # fmt: off + assert list_recorddict[0] == ( + (1, 1, b" CONSTANT HEAD", 20, 40, -3, 2, 1.0, 1.0, 1.0, b"", b"", b"", b""), + 52, + ) + assert list_recorddict[-1] == ( + (1, 1097, b"FLOW LOWER FACE ", 20, 40, -3, 1, 1.0, 1.0, 1097.0, b"", b"", b"", b""), + 42648784, + ) + # fmt: on + assert cbc.times == list((np.arange(1097) + 1).astype(np.float32)) + assert cbc.kstpkper == [(1, kper + 1) for kper in range(1097)] + # fmt: off + expected_iposheader = np.cumsum([0] + + ([296] + [9652] * 4) * 1095 + + [296] + [9652] * 3 + + [296] + [9652] * 2) + # fmt: on + np.testing.assert_array_equal(cbc.iposheader, expected_iposheader) + assert cbc.iposheader.dtype == np.int64 + np.testing.assert_array_equal(cbc.iposarray, expected_iposheader + 52) + assert cbc.iposarray.dtype == np.int64 + assert cbc.textlist == [ + b" CONSTANT HEAD", + b"FLOW RIGHT FACE ", + b"FLOW FRONT FACE ", + b"FLOW LOWER FACE ", + b" STORAGE", + ] + assert cbc.imethlist == [2, 1, 1, 1, 1] + assert cbc.paknamlist_from == [b""] + assert cbc.paknamlist_to == [b""] + # check first and last row of data frame + pd.testing.assert_frame_equal( + cbc.headers.iloc[[0, -1]], + pd.DataFrame( + { + "kstp": np.array([1, 1], np.int32), + "kper": np.array([1, 1097], np.int32), + "text": ["CONSTANT HEAD", "FLOW LOWER FACE"], + "ncol": np.array([20, 20], np.int32), + "nrow": np.array([40, 40], np.int32), + "nlay": np.array([-3, -3], np.int32), + "imeth": np.array([2, 1], np.int32), + "delt": np.array([1.0, 1.0], np.float32), + "pertim": np.array([1.0, 1.0], np.float32), + "totim": np.array([1.0, 1097.0], np.float32), + }, + index=[52, 42648784], + ), + ) + + +def test_cellbudgetfile_build_index_mf6(example_data_path): + cbb_file = ( + example_data_path + / "mf6" + / "test005_advgw_tidal" + / "expected_output" + / "AdvGW_tidal.cbc" + ) + with CellBudgetFile(cbb_file) as cbb: + pass + assert cbb.nrow == 15 + assert cbb.ncol == 10 + assert cbb.nlay == 3 + assert cbb.nper == 4 + assert cbb.totalbytes == 13_416_552 + assert len(cbb.recordarray) == 3610 + assert type(cbb.recordarray) == np.ndarray + assert cbb.recordarray.dtype == np.dtype( + [ + ("kstp", "i4"), + ("kper", "i4"), + ("text", "S16"), + ("ncol", "i4"), + ("nrow", "i4"), + ("nlay", "i4"), + ("imeth", "i4"), + ("delt", "f8"), + ("pertim", "f8"), + ("totim", "f8"), + ("modelnam", "S16"), + ("paknam", "S16"), + ("modelnam2", "S16"), + ("paknam2", "S16"), + ] + ) + assert len(cbb.recorddict) == 3610 + # check first and last recorddict + list_recorddict = list(cbb.recorddict.items()) + # fmt: off + assert list_recorddict[0] == ( + (1, 1, b" STO-SS", 10, 15, -3, 1, + 1.0, 1.0, 1.0, + b"", b"", b"", b""), + 64, + ) + assert list_recorddict[-1] == ( + (120, 4, b" EVT", 10, 15, -3, 6, + 0.08333333333333333, 10.000000000000002, 30.99999999999983, + b"GWF_1 ", b"GWF_1 ", b"GWF_1 ", b"EVT "), + 13414144, + ) + # fmt: on + assert isinstance(cbb.times, list) + np.testing.assert_allclose(cbb.times, np.linspace(1.0, 31, 361)) + # fmt: off + assert cbb.kstpkper == ( + [(1, 1)] + + [(kstp + 1, 2) for kstp in range(120)] + + [(kstp + 1, 3) for kstp in range(120)] + + [(kstp + 1, 4) for kstp in range(120)] + ) + # fmt: on + # this file has a complex structure, so just look at unique ipos spacings + assert set(np.diff(cbb.iposheader)) == ( + {184, 264, 304, 384, 456, 616, 632, 1448, 2168, 2536, 3664, 21664} + ) + assert cbb.iposheader[0] == 0 + assert cbb.iposheader.dtype == np.int64 + assert set(np.diff(cbb.iposarray)) == ( + {184, 264, 304, 384, 456, 616, 632, 1448, 2168, 2472, 3664, 21728} + ) + assert cbb.iposarray[0] == 64 + assert cbb.iposarray.dtype == np.int64 + # variable size headers depending on imeth + header_sizes = np.full(3610, 64) + header_sizes[cbb.recordarray["imeth"] == 6] = 128 + np.testing.assert_array_equal(cbb.iposheader + header_sizes, cbb.iposarray) + assert cbb.textlist == [ + b" STO-SS", + b" STO-SY", + b" FLOW-JA-FACE", + b" WEL", + b" RIV", + b" GHB", + b" RCH", + b" EVT", + ] + assert cbb.imethlist == [1, 1, 1, 6, 6, 6, 6, 6] + assert cbb.paknamlist_from == [b"", b"GWF_1 "] + assert cbb.paknamlist_to == [ + b"", + b"WEL ", + b"RIV ", + b"GHB-TIDAL ", + b"RCH-ZONE_1 ", + b"RCH-ZONE_2 ", + b"RCH-ZONE_3 ", + b"EVT ", + ] + # check first and last row of data frame + pd.testing.assert_frame_equal( + cbb.headers.iloc[[0, -1]], + pd.DataFrame( + { + "kstp": np.array([1, 120], np.int32), + "kper": np.array([1, 4], np.int32), + "text": ["STO-SS", "EVT"], + "ncol": np.array([10, 10], np.int32), + "nrow": np.array([15, 15], np.int32), + "nlay": np.array([-3, -3], np.int32), + "imeth": np.array([1, 6], np.int32), + "delt": [1.0, 0.08333333333333333], + "pertim": [1.0, 10.0], + "totim": [1.0, 31.0], + "modelnam": ["", "GWF_1"], + "paknam": ["", "GWF_1"], + "modelnam2": ["", "GWF_1"], + "paknam2": ["", "EVT"], + }, + index=[64, 13414144], + ), + ) + @pytest.fixture def zonbud_model_path(example_data_path): diff --git a/autotest/test_formattedfile.py b/autotest/test_formattedfile.py index 84cd88b5e..f8ad28614 100644 --- a/autotest/test_formattedfile.py +++ b/autotest/test_formattedfile.py @@ -1,5 +1,6 @@ import matplotlib.pyplot as plt import numpy as np +import pandas as pd import pytest from matplotlib.axes import Axes @@ -11,6 +12,58 @@ def freyberg_model_path(example_data_path): return example_data_path / "freyberg" +def test_headfile_build_index(example_data_path): + # test low-level FormattedLayerFile._build_index() method + pth = example_data_path / "mf2005_test" / "test1tr.githds" + with FormattedHeadFile(pth) as hds: + pass + assert hds.nrow == 15 + assert hds.ncol == 10 + assert hds.nlay == 1 + assert not hasattr(hds, "nper") + assert hds.totalbytes == 1613 + assert len(hds.recordarray) == 1 + assert type(hds.recordarray) == np.ndarray + assert hds.recordarray.dtype == np.dtype( + [ + ("kstp", "i4"), + ("kper", "i4"), + ("pertim", "f4"), + ("totim", "f4"), + ("text", "S16"), + ("ncol", "i4"), + ("nrow", "i4"), + ("ilay", "i4"), + ] + ) + flt32time = np.float32(1577880000.0) + assert hds.recordarray.tolist() == [ + (50, 1, float(flt32time), float(flt32time), b"HEAD", 10, 15, 1) + ] + assert hds.times == [flt32time] + assert hds.kstpkper == [(50, 1)] + np.testing.assert_array_equal(hds.iposarray, [98]) + assert hds.iposarray.dtype == np.int64 + pd.testing.assert_frame_equal( + hds.headers, + pd.DataFrame( + [ + { + "kstp": np.int32(50), + "kper": np.int32(1), + "pertim": flt32time, + "totim": flt32time, + "text": "HEAD", + "ncol": np.int32(10), + "nrow": np.int32(15), + "ilay": np.int32(1), + } + ], + index=[98], + ), + ) + + def test_formattedfile_reference(example_data_path): h = FormattedHeadFile(example_data_path / "mf2005_test" / "test1tr.githds") assert isinstance(h, FormattedHeadFile) diff --git a/flopy/utils/binaryfile.py b/flopy/utils/binaryfile.py index 28606edb6..b922f895f 100644 --- a/flopy/utils/binaryfile.py +++ b/flopy/utils/binaryfile.py @@ -15,6 +15,7 @@ from typing import List, Optional, Union import numpy as np +import pandas as pd from ..utils.datafile import Header, LayerFile from .gridutil import get_lni @@ -457,12 +458,6 @@ def __init__( ): super().__init__(filename, precision, verbose, kwargs) - def __enter__(self): - return self - - def __exit__(self, *exc): - self.close() - def _build_index(self): """ Build the recordarray and iposarray, which maps the header information @@ -508,9 +503,15 @@ def _build_index(self): # self.recordarray contains a recordarray of all the headers. self.recordarray = np.array(self.recordarray, dtype=self.header_dtype) - self.iposarray = np.array(self.iposarray) + self.iposarray = np.array(self.iposarray, dtype=np.int64) self.nlay = np.max(self.recordarray["ilay"]) + # provide headers as a pandas frame + self.headers = pd.DataFrame(self.recordarray, index=self.iposarray) + self.headers["text"] = ( + self.headers["text"].str.decode("ascii", "strict").str.strip() + ) + def get_databytes(self, header): """ @@ -1301,6 +1302,28 @@ def _build_index(self): self.iposarray = np.array(self.iposarray, dtype=np.int64) self.nper = self.recordarray["kper"].max() + # provide headers as a pandas frame + self.headers = pd.DataFrame(self.recordarray, index=self.iposarray) + # remove irrelevant columns + cols = self.headers.columns.to_list() + unique_imeth = self.headers["imeth"].unique() + if unique_imeth.max() == 0: + drop_cols = cols[cols.index("imeth") :] + elif 6 not in unique_imeth: + drop_cols = cols[cols.index("modelnam") :] + else: + drop_cols = [] + if drop_cols: + self.headers.drop(columns=drop_cols, inplace=True) + for name in self.headers.columns: + dtype = self.header_dtype[name] + if np.issubdtype(dtype, bytes): # convert to str + self.headers[name] = ( + self.headers[name] + .str.decode("ascii", "strict") + .str.strip() + ) + def _skip_record(self, header): """ Skip over this record, not counting header and header2. diff --git a/flopy/utils/datafile.py b/flopy/utils/datafile.py index ae6263cc7..9d3c186cb 100644 --- a/flopy/utils/datafile.py +++ b/flopy/utils/datafile.py @@ -221,6 +221,12 @@ def __init__( angrot=0.0, ) + def __enter__(self): + return self + + def __exit__(self, *exc): + self.close() + def to_shapefile( self, filename: Union[str, os.PathLike], diff --git a/flopy/utils/formattedfile.py b/flopy/utils/formattedfile.py index 29de5d5fa..5fc256554 100644 --- a/flopy/utils/formattedfile.py +++ b/flopy/utils/formattedfile.py @@ -7,6 +7,7 @@ """ import numpy as np +import pandas as pd from ..utils.datafile import Header, LayerFile @@ -153,9 +154,15 @@ def _build_index(self): # self.recordarray contains a recordarray of all the headers. self.recordarray = np.array(self.recordarray, self.header.get_dtype()) - self.iposarray = np.array(self.iposarray) + self.iposarray = np.array(self.iposarray, dtype=np.int64) self.nlay = np.max(self.recordarray["ilay"]) + # provide headers as a pandas frame + self.headers = pd.DataFrame(self.recordarray, index=self.iposarray) + self.headers["text"] = self.headers["text"].str.decode( + "ascii", "strict" + ) + def _store_record(self, header, ipos): """ Store file header information in various formats for quick retrieval From f15caaa0554f306eb5839588e4c75f9e14ef9641 Mon Sep 17 00:00:00 2001 From: martclanor Date: Wed, 12 Jun 2024 01:21:04 +0200 Subject: [PATCH 20/57] fix(ParticleTrackFile): fix particle filtering in get_alldata (#2223) for a simulation with N successful particles,`get_alldata()` was returning the first N particles by `particleid` rather than the N particles which terminated successfully Co-authored-by: wpbonelli --- .../test_mp7/test_mp7_output[mf2005].npy | Bin 0 -> 216744 bytes .../test_mp7/test_mp7_output[mf6].npy | Bin 0 -> 217104 bytes autotest/test_mp7.py | 230 +++++++----------- flopy/utils/particletrackfile.py | 4 +- 4 files changed, 92 insertions(+), 142 deletions(-) create mode 100644 autotest/__snapshots__/test_mp7/test_mp7_output[mf2005].npy create mode 100644 autotest/__snapshots__/test_mp7/test_mp7_output[mf6].npy diff --git a/autotest/__snapshots__/test_mp7/test_mp7_output[mf2005].npy b/autotest/__snapshots__/test_mp7/test_mp7_output[mf2005].npy new file mode 100644 index 0000000000000000000000000000000000000000..34a4cbcfaa1e20eba7f00ef2bba47a022ca59e8a GIT binary patch literal 216744 zcmbT930zdw7sp3LL_$I&amO_@B}G#tWX5IQb0Z}+R5Bt&B2rOt#SQ;1Xt?H*8KSAV zMM!9bNG_q_zAGy3xRDz!DdE!pp7ZW|GrZ>;>hSsapTFkU-sBZC=mv$Dfak89ltz ze~BGDV)Wo)g9Z;9Jv4mq$Ws5w@^1}^8C2@OjQV2eVEltf{_l?~^}F$ul|EL^&iBJ8aX&NcKG0tU&NGt(-39ssKJ)^`w453$e59% zMt(Nz)0mMF?Cri^PyB31>|pliu~DB5AIyH=q;=btZJV=3n^e!%kCVab=O6ZizwMA@ zHZ-XxPRUB_48OON{*H;g;IDG*|G+QnABl}KufCCLPHR|CJnS5!uvy&#P5FPnGCJio zH0rT?7yM=}`h1&tbp3jw!7xx^b9;<6?H}{X2wHlq!BArE6eY1vzjp5oe{UW8pP;!C zyR`Egy1E`a%L%1;iyRadVm{s=-U&+*u*zBV9`=dQVnZGA?(wn6_F}qw z>!k(U{ji;}cV@Ca+o86&`tlf|f}xHWRpldvjf{*mt^00IfS1oKUDl&cUDj{ogtLFv z7MFJZP+=kF3GS%@7aPsgWyelyCdQ7M1)s5abgUn;LHHT@L%uOYEgY)L8m=@G8}FSZ zc>C8DTU>01Yy@MI_H;1apE$*2P8_1k))ZTchE;QfDgV?G9aDT17GgekWUQ&U&mdiv zJs0Bgg-eA>AJ-Bc$~8cCFw?zptbysqv2c_BWadGuHV*u#-;<`)MeSTEH?h?h+yjMA#UAw5!o-8?(B|XM%Vt^ zjP}7Ux~!M;bk={*3s*m`CfWy|QdrO(dTWBw%g0fdH5{8RhPzx4ZV1)Hqt8wv8_jfm z98&`F)(mGkr=mW)YL3{uN}&+($y;Ke?=gifnH+5rGad(I)~%q+x(%Bv8p8e*mYUxZ zo$`(NLzRD#xbd#r!ZZJdGw-jCY$ZtAxRwF#UN(wugd!1 z1$@8pj16$gGfvwTZMxj6vTj>#Yqv%;9J3c+HK;0HY*eVQpgYO*k;!4|Te_^lb*=bE z-wI-Km8#;c=Xa2u%5?3TB?Po?-N)1{p_)E>E>9H|?sR3PG#l5Q{6vqvq`*R6L@44ldO&|hzgP9D>d zoyK&b&%=Bn=dTU=Y|#nvtA-w8osc)hqR%%fY~kBK8|$Z~8(Ck{XLp?!zx|}Tcq_|= zjiKihHffK+6q+^L2)R;sKDpqUC#YX9Pfd>&1Mq z@R zw&=syPKL<}3$a4XQ44?4|BP-My1+c;O#RAYR;B40Hnq!DefF3=vT=;Hzd1w0_V;_D z&t^Iz>&e)HB{MZ_NW^n}c9k=-IrS=wg=b2$F>ccWG9JIwXJ@%08^zd?`&t&h*Xg$N zaCc;#8GH2Ee`NbtNA_pW%3{OkTGr;Aze8sA(1RBU61sR1^1 zT^k{r%-H+?l`Zx{wiAWD(hS*KbqKoqS|FQ9VbfY8+mgaA^F{VlZEegMk0;q~=Xif) z$5GfQ6S8$EY*1%pch@4s+^!3_XTSjS$-rq2vTMb<=N7lb2wy}CB$`M;00-B#lwk)1_h!=jP(rm$UyA$#0|5Od3s z$d0D4)yE)PlfoL}klj&D8}o~eBW$Oat zF?-47%C28DGzitOf!gqU9(L$(ivEjod0v4d7O>_(97 zwz_@_*(3^k_6)Kn3VY}*vezpRV%~Kg*;y2J>jh++Q`ilckUeIvjrs7QK-=w{bOqT_ z6n4QiWUEuy>DQ6XE>DPg+)ZS|DD05i$l6obus@OAV5g1QZ);cE?cDb+vVjz~>wRP& zmLur4FG6-Hg>Cr=Sw9Nf=n1kH4ceI5KC11us{Rz&sT9_^7}>@Y)=+}%{(qb`>x5;- z{Ny>ZLn-X-m&jJ3uowP8_Lo=MnA!f1?RLIx=n8A*11RjZa>%}XNzlDuhwS$h_Dp$X z1qyr29@)Yd+L)(Ax3}F^2Pz;thr;f0K-P!C?x=|Dspo{4GaZo~M`6<|AzPcmrZ^$H zyF?o^I|pIAo!7mAYy^c}<&3N&g_j(YuRJ5fJf?NA$8k;1-T2iZR#6Jma^ zF0%6|?7N=GwxY02>LGjPkv8V!2VS<@xj}to<0x$H2FTW-u++v-X)WT#Qs^UaaHYf;#Ee`I&w*2bJYs;cdFj%$zXAPPGw09gkLJIsjerdx!V zqfE&5q_E)~kbQVltIN(W+it6Y9g$s1VS_s%>qlXOIwO1G1|jC|W@O_jtXV*|5rs91 z$mU+x#?0;y*>2}{U675Yu&ufx`zD2L-VNFG0z%AhcSkmu!q)47Y{@mPE<3MnyRB*l zBD;dZx_^LdM+)oG6WQxm2{AkNLUtB~wF^SFIfZ>8u}80HV`gWsZMXB&4^cOk!WQ*L zwi<=~vk$UamkBXn?~Cjw6!y|b$d;q9XM>Ub;gU9Hb~d)X%`=}T`XSqu!XE06?A?n5 z-9H8(JDH&L_mY`cq^h zC~Q&~vW^sX>1W9PcwQTG)WUnV+j&7avb`wmSA&p!bdI1qJp$RK6n64pWc?`YxJYC# zp4GO`@>vha+pEux&>mTX0GnGrLn`yRBM|M0O^HeJ2*# zrWCf(C}aH!UvB>^PuU3Zue^970RMqvj`L*09aw7UKM zw%Ts1GZMRi!uFYtx~(bfF^SDPNQn7^8K@gaVGl@b9SYlJChG1!ppBW`Ew|mydn7i3 z!gfqR-HH@;hs18$Pl(xn7V7q-u$dD3aGzG!DQ}JKwrVpQbr)0Ebcyw)ur0nq-TYia z%qbE(nZhN8 zh`IkV)a^%MPfKh`wpJHv(b!ze?fot4uAs0-CDue?1DB(2!FEE-`z3ZJg%!U;-KG>a zM`90UX=7&hQ*5_$ha}XEp|HP6tQ&=We+BCPvW*b)uM*pz!oHV`y3aGUy6n8S?Y8<^ zVv{NCyDL$*BZd7@Vz2*7h`Grs)J>qUKS=C56t=MM1B{t(1Ld@0HpzZ()yGUYRZq@3tvud{6%5^R3uB5PYCAJfVb@~ByZ)_pNoFK8Y zD6IWD)NMv#r%LRRbZyM+tc&e-ekHLZDC~sw7_&QtEtc3{e)>}|_ z0EJDH*cThLy6k(m?Y8pXin=Q)>?VorL}9&tLERhc2{Esg*aQk&KLd5&p|Hskdw88T zW;SoP-OjasMcv^PcA3OhrLb>hqV6w05MusXV*69rH@Bhg^R-%CHg?-?t2q*zOkpc! zp>9VCJ5yo{)(~PYza4dFQrLKjZAxKZO6h!xNMAh7=?W%u~jJSsNYa` z>-U71A4+U6g&np7b^l(a)rESYHrwis#3oVLsGX>5B(U4IODxptRo9R#Gv*r-dv&F= z=*a6&_5z62HHBX`-*5OXjCnd^LrbKZng5!-EU{jU&Hwv9vIBRc?*3$Fak+{eyJL#& zn9oV<5DFXoJL$pCxH^q1L_4&$s&?)cuyi=1OdP z3Txhrx|hEr#JpQ#r&3tsAE?`i!fuz?+~wMsBmB!~=XSZM`vrylMPi*PY^!~!oBAyw z=FJlOA%$(eA9bHB)9U8`J=yj%xlv-5QrNc-pspWQKtlcrxJ-dhy^Aw4lKw)1~)E4MPWZXjk>4i6Jow3v12Lh z$7fKt28BH8OJJt48>C~U8@sJmt^A?8C8D^l2Q=TP_d9Ifu! z;bpYd9}=5LVLP8kU2h7zOJa{D5@HU>N8OPWHcMiwQCQy#sQc?z+L&v#D5IUXN^A&) z^|^?;FJ=>TQzdpKg>7~Tbvshn4HA2OmNw>v_a@rTlZ`K$ZgbJOkoQpwk?H?EJWRN@!FV~$DHl9Dv;O-6gKQn)U88d zFG}q0DTJ6q?x1csh0T-L3KX{QUDRDaSsQb~4`sCTaf$6lVSC;~-MfLyUw^%DCIg>Cc%bq|fx#@zL4oXtEryTh99 zP`4Jm9@TpUW$nedo^k7eEc2%{{Uz;*3{v)Dz~fHeH?922Ac}oZ$-|M zSmr;Pdq-ju|0UaY%})}`JX43>lGv42EckHidA(grxgxR5=e4*%Vz>NDHtbQuKx`}K zO}pu;#O5n(JoAsH5Rao$fw8RYm2K>mc_#5y#?_XUE1|CF&I z5!;P3?i?@WbuKgVD|kvbURPv(@n(wlLraWid(S0_+uec!AM;LUysyOtPMljdv5*wv>F*$1v70<$`hS z4wK2+CtcQ)U#~Q9M4$8{LCn)OVJhkZ3%cM%-?!igeOFcub#4l;0;F9hl9uAZ| zZO1WPcrM&i6O4XaEgtK#p8Ps7yr~$zXSM)m1G;_|fV#jIzWpJ3CD%_Isjtg?(mh@! z3g$M>;_(KO$NC(`vin1(Wy406@=2HVoTA8!fW3OL%UZJ-KI%C>2n$D+JAtlewte;pLAJIex2uSM4$A4osMGigSw~-Ea*0N zC}-O6M-P2n=96w-xL)Y&;2`?Uaz%C?>iV(Iwl3?xe!6UO?I00+(oNUxncwjn3Jbb$ zHr~a%l`eaBVvu;URf+&}h8p$~JD=&o9czf(77HHhcgJrPM$EMnLn7=@cMW3?A97~C z@NG@w3*Xk&{dC@3? z8S6RYM`L&A{~1Rws;x^o93aaYLB8#KOY+opHy+7aI5(1pAG4=rJ9%Z`Bhu*ZR<^%R@>BlTQUya`G7k|AZBn~;Ou%HXiq^-6Bk9D)p zb76PTC1Gzp$;AC=GmsJYheEDO*&T* zUG5zdBK$v4*l^eCri{nG8K*9prq42e_EG(-iaFhm3TumE74~V9f#^m5v`LaK+dlL% z`qB3eJ|awgD?wr5{cd{L+ZgO(@f?@mr~GPQ-tH^vy(I?kIxMt~U4rZ__I_D?lmJ#I z-*|uGPF-D=FT@oKtBLo64++Z$>{Qqde9FVK;H!6zW=K7Ie881jt==4h{< z?w%*_(cu+L|b3Hp;R%lz5HmsJ;& zlJ*PT57owbJFs3pjwZWipPQHmk}kXZoT1Ch9yLV&KKt01B>B+;8{zM1a`EoX`k}>- zUXE3MH85}Ye>hvbMf!f!&(6X^yv>Mh4~!|CD*zdouRv*{KQ(dcaQGbwHoxzVI<0?h{NEBtLrj zJNt|~AF_F!jqeNG7k)?3J|U)6>F@i%=Jt4|ugiVmT^Q@UTFVZ-W%06?KO6a#eOE^x zqJh2k2{R6A*|ozhUiLC0zp^z3WK(+X6Rw`uvaHspe$3n#{>2CTglShwvoAJ&SZa+z z){|ci+!sEOv1!--Bg=i^lLGe%qgCrFW$JQY_`5y!2^s&D<-YL#jLrM6Ecb;^>`q|0 zFMNKteVTQo*r+nvnft;wqp;i;enMA*F875$*oDAyU-()Smixj-iv(Tn3!f!uS@=F_ zbKQ*l!aGq|?h7AcCg^fs_)VP&Ecb;k=|o_;FMM|j%YEUOchu@KZ%W(k%zfbtI}ljz z3-3o^xi9=|6G4~z!k;q|Sndnol)`df`0)W+-MlqrwDWDr7k+_!BZ`@*|WSndlS+K`~jec@9Z5LoUDZ+H#s zajvKB9Ls&-gX(K_%VfDP{Hl5cmixjNc@kLe3vZ&Z+!uafU4kz6g}+)y%g+6#jCSU} z@U1B<_l2KUo1n{m;ZN2gu-q5EA%*3>@Ub-sy4)8&r-qib4=$sfxi7p2h2_5Rk<|&h z+!sE>gTQiMcn1p0ec^+v5p=mP{D!x*>;(5R+L`;p7rPT!?hD_Y!g62ucGec^9a(6YfUWwaIdh4-Vd+!sF4o}kNp;q%KA zSndnojKXqX_(^sIUG58ixSWY-MbofWm6H@Gi+W)XqqzVMC| zmixkoJRs!1eW{4zqqGmJ!h2BR@@i9Cxzv{@T=|;bh$5l(H#QIec?M%SndnI z=ud(!_k}Mg)Ur!Em(kAL7v7h`a$op_+XP+i3!it3z;a*sW)znD!cVwK(B;1H2XAOu zXS*`mnft@ir=MC-)zxxz{<-YLl6qftKN1i0;a$oq&69ks~!aGt}?h8Ng zxK=kbtBiK$zVIo>2rT!7e|40=a$oo$3d?=rS05qha$oo-hqdhKab>g>_k}l8SndnI z><~eh`@$C>mVO?hC(vua?dEtE=rC%YET%QCRK^KXea4m;1u+$RV)Y7v7D+a$orH z-wC?h7d~URmW8`1HrFS)FT4YV<-YJCy9m167k<-D0?U2jU+o~U+!wwlh2_5RtAEq# zrj0D4ow+akqih1pec?q4%YEUOZYSt+U--f-0?U2j{V6Q>g`c}ktLx=cMmuv~_{*6D zmixl{P+0B@KjT+|F875$ok3u^FMLx9%YEU;|Dx4Re$d5s-r&CQhqn?~?hEfpVYx5- zh%E$N?hBujPGGq&ygP;EzVJ~$YjxdI%V=lr3!jxnV7V{6Glk{8@S&Rty4)8&EtSA> zUwC^8%YEVd{-o7S98yL*b6@xkDFl}L!k7F=V7V`RAcf_=@X4D9y4)AOXrq?3YgR^E zabI{dh2_5ROE(a7xi9>!^#qpt!uwNL?hBu|j-bnZ;V=K7WycqawsS1^h4-Pb+!ub@ zT7oY3h0j|vsf}`@%a=SndlSvYepH zec@BS)v^~GmC?@J7yi{U0?U2jdsA5M3%~Xof-d)kf4Y>wa$orF6qftKCoR$H_AL-> z=V$H%VfDPyaR>hzVQ905p=mPe9Hd_Ecb;syoP1< z+-$cM_l560RjXSj%YEV3#uHfX3tv2iz;a*sKnlx!;gcs5bh$5l(IhQfC!~ya=DzS| z3d?=rmrW$-a$oqn69_E#g%6;x+!uaf96^`+!WWF!vWu#h(azi#-j~91U--mv1YPb6 ze{n2<<-YJf6qftKPa8wf<-YKFqqVG4UT52RgZsiaqp;i;e$ps{F875$8cSfgFMLA^ z%YEU;jwI-EU-*L~wCvO+WwbN*g|9P;RIdo3!f81V7V{62ZiOn@X^Bvy4)9j z$51U>+^39o=DzT56qftKM-CzAa$oq&F9|I7g?FZ~+!sDHnxM;l;nTm+vN3LDv@`dG zcc8G`7d|A4pv!&XQ$HuL+!x+JVYx4S-$;Tk_k~|KSj*lx*2#8$=DzSH5d@a|!uO=G z+!ucJAc8LUg?}1OV7V`RR|?C0;gdeo>JFS=Mmuv~_@XcZ%YETHQCRK^zw}drF875m z3?;DK+dY87a&PyApJ;VYeo#g`b8q(>0|_kmcK4&O+}nNb#{^yO?S3_cz;bW*HWZe7 zyC)3L>UMW7qn)|8dwzcc%e~!OP+0EmKD8e~mwUUP4kobN+r24;<=*a-KGN#$KG4y2 ze&*ip$NLgk?(N=~!g6o-@qGxo+}r(dZvxA`-Ro0W?(IJML#?jw>@wP!d%N!sBCy=s zy$*%t-tHrM5p=n?``(@emV3KO{?aaO1cLx$!?(ObQVY#<^R1bnK z_jb?jPGGsWyDNp|-tH0Iw7QKQ%4lcq?Vj0{z;bVQX9~-`-NU*Nbh)?tR*}GRZ+Axu z%e~zP3R>NyJsoW4XYTEuY9_GU+ue@Ba&Pzkoe8?!+dZWdf#u%rhS#ue)5>To?(N>U zqgJ;}mV3Kz=s;k(xBCkdf#u%rK@^sIyRS78bh)>Cae$Vc-LZ^z=HBjs6qb9tuWC=w z<=*a3{0S`gcJE4Ixwm`L`vhI??Ox=kWetWh+L?R1n<*^!c3;+xpv%48@A?v0?(J@( zu-w~yX%MPRwNdo2pfz1>IDBs^sndFZ{}7A20dU?$pwVCUpcd$!ykB+dFH8UHq3Gr>u|sFcci}p{*w3${vQ0&vTv@`2=6 zK9czWf-jz!@!05?V(}|iv*1@gbnHp9A>vIj=d|Qk9?RH8)%O@DnSNp3!2=ZkK@H3N z${pPwo2#~S5mP^x{K|oyx?~XZwG1+@7TW5vO+%)kUwNy)D~qcKS3=)IU?Jwhw|$KM zms{wv?70x?hrA)oIp{1FFOvMqM={pf?v9c9M7Fo~D_6(dX~#-o_1MbjE4{S*9rS7h zc6^}&`j$3JsHUsC`sgp{b>3p;8)8u9W5~|pF@yIvc)jXAldSK#cBm(`_kKehcvSK_ zpUqhCb_LHSYp-*4%-~mEILcS>dAE|7^NZwH4s8D4q0C$M%~F2lYIb+wW%MgAIqfK( zth^3m1{T`cDX%>9hCikIOoGqx>>q-JE~yS;Mr+Bh{42&@?&WQY&K_m7_A6KGnqGl7 zY=|)B`wF7xj9I7)Ea(O}$2b% zUh?Eiq4n40#f(_V>wG0+gIy{!pT>Ts2>+G3Y(N`#^g2I&vYeQi9ErNXLd>Q%b(t^i zB3+jGEs6(6OcxHf{YMbn1|z$QvC#jV?S3={`p(v6U)^jZb`PC{zS2!E2nsv&)&zVe z7v7tw%U*5X82!pGpZr^BI#6P_@|eMU)D~-CKI!SXUkfL_{t_}`-$&hF7#o_kpFNW; zO>@5)rmI`|cw6)<5B&13Fyl^Bg@v|)JeN>yfG%4j&`(VHAQ}Cgx9)oz*$k!&9JKb6 zmwk0vmaoL(qLqUC_`80p1%PtPUgJ$dn_Xvx)8ne3?j^=Tu8Z*h6#RpAb$hSvg&yl!O-~B3QO*hr zZ8ddCc~gJC_jK8=JNk*HW2pl30Tx!(tbpugrpwm6v8`@BZ>r0J$NKDqRAD!(!<;xo z@>qY&*a`0M8euH7_E=ZH--^p3#nexKV!30x5S?9w9?ih6`)*GFd`8Q^t)Z)HtQm#x z*Zt3JLZGkYv0l+0DEN#cP1YXkYF+SWFHYPjWQPADyx3TXx(EQ zb$x5bh?|zL6~NE_^tdYuoAZ|=e!k4pR+kOmI2@nnW?eQiuk(w@*1(wKndV2P%e^Z9 zC$=VkziO5>A^yOt{N8b%lj$gCZAE&$-3ek&G>zDa~`P zvA-;{jUCDlqQE5P$;o)E{{YPK}bnTZ1N%&)x2$a0;2 zTd{dgc+1n~FIrX<{kBT;oCL-$8+O9TywXd}bJQ^#*gVJg;ZyUpRTaccqcqO}HhWa8 z>B-Ae0X6I`^BgrhO`L-Bocoh1h#`%fah?M#^nDla^nmP9t#xDGFy<>EH>Ny}gY)g| zk%jpNcxc1Fkn>j)T^8m@C&#`ibosHoSYnjsNJ)&%?cu@Z3n8Y2YBhA(t`}Lq?@(Lt zooXjKlsktx2Uv*tYTSD!XS+9aS;!X-zrHQFgq9PZZjk0VD;VoJ<3}URRoJtk%kD2Z zg>#jJ?ym&fXmgm+4gl1XhSL&EscB~`%?w*h1{^@ZaDJ&ZUF=oH5vvhR>yLpLo*DS;F_fhZ8 z$X;Xu@Y%xqZJ0k^mxcLNQu=birQ0rHtkECYhm0*+AIUu7lZ@#rVszPd(cWU9|93e4 zuKTW?!ZsZkgSzI#A-ZhBn3iJ0m~U|$EPT5)vK82P1YYG3bEmMu|A~chP-5dv)+`_5 z4Dc$qIgVww4Wcg}%X)+0YsNAy8)rV(V6*w;euHRtM;gn31^*NH-hJa(10`lP+xKL) z*?!YDb7Yo58Owkjy=WYpTRkZ?mZ@17%R(6&!Puu!(pdJC4L~x>#=-!3JW{iZT9y-H zI4h53)-1enJvQ-`J&t8V*BHb{C#11#IW}TEn|I$g!rWbth41XElJm?@>c0}6&XUHm z?-&~ydDCduECJ@;y6gm(F*qJIT~{LXT>U1FDZoPB@NsM&aPEk&E(_ye?t~ZSW+5fQ zBR6RrgmHnbrv$J(*M9!*ZFJd#8z&3)BmXf&&g;}c8V7-eF%Z}PDNdZv=en~c_DnL6ft=B*B&Q^Cc%!qt-Ps4KDYCU}m z`;=NMP{(|3;|(0k++(f_Q{MR=b%BMp>YZ8FWFLG)UzdHZOCOF94%fXPm=mS3ESIsc zj?30$0@{RT>9YBaUkZz+kH-AGwDWw_1vVt&W6W2%J$}_?YrO}3K2AvMc}|GEJqOu* z9&_5rDA3kr%N06_eoMc?+&;h4WMnTews&SI>fSiET9@78=Zs@nPL2IS{@-!P7I9tR zllH9AWnqotqTgI0n)#InyGSgI3k5&ag7MchyQ4lkB)E!r!>|a)g|)+DP#4&+8^`e3 zNT~L$uI~15cd^a-B{(j$_#_Hh7#EH{tBUXUYTV-g#KO2Bu`n;Ruod8uVL$LHKVvv9 zw0tCN&6K|5MB?9&)^XYYRr;WojWZXSHk)IL9tuq_NaF&q;PZCw$g@)8f|{)npKb1W zV4Jz|v4?`eFt9U70-I26Jg$qv@8uk;W?@{|z}WhXZ7fRTf)kr~$?WNIh03^~W{Vyg zgiOxL6URptqb|4@5%FKPtW^8La;P0 zY-a52jt2IfZnW{$ss6g{9QE6mtc^;{^$mA~tsAPMF0jz|A-^xY*It+H*(z3Weuw4u zus?;pt6Y(V{Cw;AM$GLA)%=(|&lc0%IeLZRqZWn}j-mW@TmW(i|h+?Fv#{eBaJ z+5Cv*_KQDV7cS42zT*H3F%O*IfPDwHe8*94v(Pd*wK+pC$2 zc^-4o`nOFdKe(pL!kA+Jc7!0hoDi;NNn;A+*p%AKjm3QmN`1Fd>*gih7Q){fi#fJV zP3gN8uz|i`V9wd_$3b0PxB7ny@O)eiJ|L{iUx6_{;4!yu9cvm9a6p%ZdB)nWz7iHK z%@$femc}E<&l!(DF*O}HKcMSX%Xp-Yxo@rKf@4%7=GdWL7S_jc2*zyvy-dwcdh(p@ zEzA+-Z^#lF&s=~p+hY=h@B1U!TqRRC=4ZJEarB_CF+b1!MjFc?KQFwOji0ZWvE@Ip zke?+M_JJ&{13aqi2VUj)F+VT3Ciw9D9EX2Ho1e98oVkA3X7kbB*M#ner2Gsl_{ze( z9ljUJIabXUdu5wf-QQ-u+xi;&oTdB>?DB8NYv%207IKx{1BvZgU&_yQS$>w;;|=na z{H$h|ZZ-&Rj4ec6X|K(ig}1I3b7mx4k6~+2p|012%P~@Z{+&OQ=_?)>Gag&=vsxGO z^QOa>%#p({2|GNc{G7wsyfvYPm?wdSoYS}9 zhXA$RFUP9_$Cxx)aQhq+i z*uuAeW^-p(_H0@5vs(Aj0$8{97g|3#DlA%d7IlFIU09zene3#m%kpzV-w%Y*eg}o8 zO{Dw`dD6w(&BWGa{H^n|S~v2*c41k@2bk-YpWBDJz)mu~g?YnmnkBcZ+4OI6Fh3uh zuuo`{ybIa;Jm%=^zO22T>h_}`kJIegh!YaUor3#)Y28fDlWgy~)Vi5kcSQMfLjK%% z%ymI?Q&1Q3B%3?qv*DO>L01>X1=I8Y30wczCakXTBeIa|VDH!|Z;*-gL0$Ip?5mh3 zQ~ya99Gm~3u<#kNxs@?{)Ja`7F}px;3TNv%Y@PpN#2REFPflWobKp~kxv*YecpVFQ zQexxTzO{v|2>%%SfmgZRFi#ep5RU#T&12``-_YhsEgNS}ir;K@+kQf5@tc$w3%`S{F2%R`9&tt7wcY<^u%Rt=vr>C;FNa{`@%iO zvfR#eYn95A>b7!hz8dqS_r4qlifk@hRabZEUUn9Ot$QzD{hQF=?j!-{M%2&WP9oP%cP(feP zWA}fx-R#mP%N+F`19r#CIGhtg2zo5!tMQDDU~I`HDPM^!U&*Z7v|UQRQpfD; zXAq`xR?b(}tSw!04Jlu(vd>{TN6J@!@;gi4N9__CZkO`a z9mc{Q4y*&g`2xN7J7Q=r%vX!P+9{+A^u)doEIgCzzMF0Y&s04Yp5;yXZo;dl*@9C) zOU~K!u$2kg>B-CYblDm;W?;@Ks*o-CI94o`bL^T`3^;tKxi0(2ewL7M$P@eTj?2=% zB;*`e6M@LtnFL*S?i}$d#tyypJOK7%!d)$MLN(jE)+WqXX%n{!%Z6P*U0|V|;cO86(rf1pdonO@%;>mT zNN#ZiSvhaiYVir1>y+1JN7u;`HvKRa^TxuTq+ADiBiwbFCfBKBhFn*WHA9H{W|OeC zrIhO+Z@@mPv)z}aa-Etj{`5D@IalA^D0ungV$5>RIr^-RDgWPp`9aX**J69&zsFY-!XQ*lyiVhU(w67cKA!SHe$&+YBp!U zcJrdZEOY*XElSP-_IQH{?A*w6Y-{=RRkM(DUNQC@V*?IIIVXhW9GSiQVWyIE)U4=k z5L`ZxSUKldv+&mSbdP72gFa+FH*1UF=-dVS;49wu;hd^<&Qa?^&hgrP$DHnwF05KE z<(z*Q3-xYcP37E?e#$ecW>33x$DDI{=g&e~dVSOd7Wx9q8%CIu>ctE>r_S)|f=}}_ zA*Pj-a}4a6OxhEMd-T!SmYk#3-Ee}Pg<$g>_g9;R8rPjr7g*4ReXAE6eROq`7c9oS zvD_t9IGy>dRNjDdJ^RPh)n#Fh)MjEgq4(Jo!GEPCZ*&T~0pHgHtn-FC=1vz@3oq^j zWB+yQd_&0_ZqveW+^?VJrmGwE3EO*Ob0qgo@I6J^d#c9YFPzDZjk4@Lsddd4*mqdg z*2`*c6e8c2zQcNOwtiY~w)SQD4y$H;s>H`?!}~qzA@zMX{`rEgb>COB(DyfWyoc}i+&ro8S7j{tegkXW_tk7povzsT zLHFd`2G|FIg+2)E-g-gGv!Q0;c{JRvDF7SNNb39UjD7NQ6tdQRU(E(Qnt**D*e9Pz z`@g`-y6k|luI}21h1mCjbu0e@pXax%SlGwa`;4IP-}if_K3x4p&n63N1 zI%cnBE3yBAZj!kWb>;pGZ3XK(db)NV?3^jI7h`X2zlf~de}R2;!ZP1b>)!9p&U>={ z3vAw+yi)xa*jvvl>FU0YmHV%neS`f0{J^Um1NPtHirH=fUOJpaA`EcD;*&UWxQDg8*z%44?|W96~?f4u(!YdvpSzEgD*zNVrVrA@pn?I8S=i8O)ziM51?C$BfR2jRy7z<-}O6{7Z`mdUm$L{)J zOO>%3Sm;M%cX*nt$8I$%kKIQff1`}uO&AMf_wsN3*qMII*sW&evHNcQWy;tMEa1X%6EMvEtmB;SXF5f9*_dAS*v3qa5;icyMYE~J$ zSq_lLZeXRcJKkhHcB@%=?7llXNi%k{^Xcua$8I$%kKIufR%*s>SZA~zyVa~b{yISfDe&AL9cd+jto35-CJi@=B?fY6*S}UmWY`U^m04%H( zKwq%#`)XERD_HKtzK`<04=jA&hwoJof}CU3th`o`#8`Q)U@2cK0G7>Vm9bmR%4-F{ z%6;FOwWTYs6@=BAp{y0W&!6WO7wt>+eYLK~r865B6O)(W8S!&(8HowOeJ)vUZ$5VU5IvQ_{ryk9tbWxZCQX63bl373{B zYX!|X3x28AYXxdnSu0?%%WDPA87r+7#G9vX zyq^NBoO7&MTe|Xo%F@q?wz~)Gn zoTFyt{gl)iqm}&>V4*L-_YCX(6g8{tr?A{3@23PYR@zUI_JH-Iq8p4R&*YF6G)iJbJ6vY%q)?818k*;=q=KSj+d`zb8P z%KIrM#!CAsrSpcGmG@IlCeBs%Qy^c#ehS;)FO@gcth}GHW9&j@KSj=Uy)%=I*83@H zR@qNs?Je)8K(3SaQ(#=M?1`vZc~4|SSdwNxh25uZZ@r)LZ&*2BsaY5J=-3au%Kr}L zt9m_@b22sI0e(%s(z4PynegR3lyfq`!kRaX3D)^a&C2Iw8lQzbD$;%muyEF;Z$asN zrDkD{1m|SD87rTY*}?La%vzt5QM2+n8DQmnWzE7Hm;0}pmCwnTJp+|R z8Kj((0TyCjHtcJ5&fK!Tp=Kdp!8w_=eWR3fG9NM)&dK!7yi_V*sag4)%<_K2m2)z{ zf^OK2mh6nXWsOnILf(LLG9HJ=E9Yc-GZxOtz*uBG-%zv4IT_Yw@;Mn`rE@Z{25s5H zRkQLrndq)llyfq2o(y*JE|qiCtb9&p^toBeIT^^4@P46Ywe|fnHLIMHVL4VlC)0tk z(m9!UcAv*`PDahj=VSs0%u~+EK(2#xGE6hT`kaiKmCwoauD3)vCj%_RytMNh>`uDn zoQ#@P&bqK?L_Q}2xlTIkQu>_CzhS)&N%ICc^RyJ4iB(QL1TfWPu$3mWj^WHlcE1&m1!SbZc z!uJ(DRzB|ytehvUS!f$QRzC0D;;-h)dG9g&^M!lM*5|#|?-%kUocGQt_nvazdn{w& zyf=6XSfBS+v+{ZG9j5n{^WMP1bH&bzv$_G6^WJJ!Iq%K%<@4U-7%QFkj%RmEE$6+} zEaXWz?_IdIy>i|gax9$p4w~x`V13?O&C2J!#p|MS-Wynm8P4-spZ8X?kSANS^WI?} zbWzTG4`wW!_x7E=o1MM1%5h+-^WKq+ zh4bD4PW?*dNi}OZ?;Rq@=e>cI&U?qRyHA$$-fC7p?;Z5RVCB3w+{}fUDkgEIPX1lcA|3Ly93jO^WGEOhvD;~r>mUzX1QHH?+vVU-aEe3JzTY} zeBOJ+*tyDiZ#h4+vjG9t=e^aea^9OgoAP;Y$j`4k@BMFB$j`4k@9hd7JNtoG`QO3( zyvkXS?`d{|#(B*=R?ABFG`Ci-EXemXfrWdTFpssqr>SO@dz!(OmHZ4W+_hBCW7RC= zXSk;s!&v#A=1rELW!Czhrka)SX#y+fXKNPTxcqFWS^1u3&-BWId{6T$*3Qt6{I)up ztnX>6S;)_DPqW60s>(ghM8?8BO;|IxzNe{X<$Ic`k83LTG=T-(Nv6^4ySL?@rkaHu z3->fPCDu~zY0hCR+~bkXeOk^Us#)cpCflc%?`Z-n-P4R`^9{>BotlLl3->gO8a7t$ zX~MVw_cUXp7P9+CmK>{Q<$IcsPIxKzG=YWpyLR|*?7OICuUySSj)i-g>uUKZ_cX^N zThBCY*E^+Z4yalAo~GaQw#q$CiG}>y55KqTJsZkBP4+p<_cY^}u5?dRS}W9JA;-c! zO{c#)DEBlWw@ddlTbH_1qmEg=ry)y3s%R*axX^BTJC9v2=YBmV5NJS&|a2%nrc?Qr@3nNC(1od7#ARBc6T_y z`ktnmRqkoB=UTp}*_W}>Jx#bzYq_VXX61XD6Pzb2_cS|jUAET8&J?Xu&OxbJ`JQI8 zol}&1njIMn_cWna&}^%Fnrc?Lr^&_?`JN`Q(mhR>BU$cgs#)cpChJe~Jxv%FUUyIP z->@()yzZW66=;6;1F!PGgX6-k^JcjgO)xybuNfD#tW=BU)q;GpT#E)+s6_+ov2eFd z?h9&Gu0<35BHt{pZvYGRWYp`iY8J)?s6`XYSh*HW2^$w=maP}+>dLiffR)DuYZl(P zUd(bWnx-ii%yKQ7WY*5`JSRV>2z9rVJ$g;dEMGt^n(P-h&2lZ8m5ha2G%nt)@XWWK zF64HoMRRMyW3ya~23V*?!`2y0*0pHVx^gX=MGKyoCAUzE!2YF4R5!`fS} zMYD>rQZ1TzlXWc`H4Ebc)S|h{Y8=Y7Xkbi%S~O74$+{Menw4wOH1u~6}+hmRdAw7RCjrMU$KBB*?XB<}wy)(d<5OwbcE6H7nPmdAg~pQi}#y(1pEn>$L(k z3%MO?(b&IVO{qmQkFl`EH}{*KrRL9SR;fk9#$UM>4Y1Pp*Lah4EgCf|*P;=}G*oKQ zz<2~P!=9IQEgCfo;{w#8NisH8YSBz!EYzZbx_8!h9@XqUUW;bv7rsg@8elY>rZkm8epNFLEE|(jhdBf z(ey0tt<<84;xU6qhIK6(HOtO0Vl5i`4*iu{G+!_lYSDPk_>%P{OD!5T+d`>D6T)hg zNwsKzm1@zzoYGQ@M$Pt+YtgVAAlIV#h_O;F8u*@KsYRn^hu0;c^REtJ>w)FDVOt}^f%K>sN8W_u7SBpli`#KiJve(t3af8Q= z{lKgI@8DR*YRJmB{wEyY5|Kmg~o+ zvv!6)1bN50eyk>D*7vY}Y}gNGxqj>x#=_cWr?AGQ#xf1do@1yVJ9KBNS*{-oEYwtl zvy9gDW7Vu&KXzng8*fybYEN08O z0yV4Dk7eh!<@&L}O7&yoP1f~e)vR1UHlgPpvs^!R6_2@RxT_K7I(nZkjAc+iwobKN zvs^#+d&WZjShj~-s^)-N7sex~A3N41&n(xE1s3YZ!Z>JM8%@p1^<#aGoioezV}XTs zhWp*t^<&j6j44n*_F&~ZX1RXsYQ{qSSn!s$9#hn;TtC)v)MK+;KNeWf&8$0^)kw0` zk5#iu{aDtYc>7S$7-TV^<(EV7S4RY+J<#)G&L*NkIgP} z6y*A`z(UMZm*lWJt&4Q~FN`TrKQ{726{UXc0>+B0er(C)cTCpxW7WD!{a7|c@6wEUzDH zw5}hk)`j}9DNsL_%^BqSvA|08V_^AA7gWJf(hY3w*zDzUN4Qb^Tbiu3SI1 z>u+Bx^<&>}R}w?CG<~n)CS~UYfCp{yHqFt>c{}Ia|5a7-XGY`@wm4*CED_x zU9Agixvf7vVeXzSnh)QVYIo!^HY8%ZF~G@#J>0UXo zz<8W{m(3Y0_sZ4mlLv4rbW0{jm)i4EvoKdV+PSt+92;W} zyCJdrk!A0s3^w}aA))VjFZQ0}%$^l8@$Lh#u)hb|o-^*~J{vI4DTwtEZVa4i9(qfv zQ}hdCp^c#CkM%r99dl;nZMuye%W_tL!)rE zpSj%ZY$wfCeq}6-WpLgM#$dfZSe(sjv9jmVzQIQGh=Bg63oPiuJpfq4)?;C=;+Hg0 znEPOp`Ea209X6A(?2LK9(8)}on-sDJq zT~<%k<5i+iaC(RNcmqFV*D@CNtJ!=3&-LoX?6Rae*6Z7GaHl!#k*~sn=H*^<1K9U= zeO)$ZxO?qeA*uNpv)i-=$o{}|p*90NQ|ykKE<4xTD8ly*_nGI-$q(u(Ea+xDeraUi zb97mlGnf~y7w&#gWcHcmitIYn^}{opuKoRV+2qgYF!84STS^ZF6O%pe_TghVC8j{Wy9Qbbwihr6QMT$+z;yuyANDKHlD}ax1cfX zm+P{u|A`Y^4hg=48nBq9n)6c`%bw=|c2?Xrdu+NGHS~<|D*IiwFL?rWfrYad>>LHV zPg_w}H@(^{mUGSvs~f(@KBFTF3+)W&>)1IPU6#$~#HcM-gus`6>@L6|WdFx>W889# z%y0wevU+{cci&vmvF$a?v&)7ZRM_ZjsL=~&u> zSAG;L>?pl=S%jes=B&{v@PPdQ!9x61);)@m3NT+gt4oI zi~x9+*}9588^2xr_O_##)P9g~@cZ(Z>wtx|D|S}g$o3}m*`OS;j-#{a6ZbhgV^dXO z*?Kt}_m9Coy)Ikx${seRIE(HHi*vA=^Z%W{S|D9=UobN-TjPic;9 zD2o~P$q#;Cft@S(M4v4>AvWn;MJyOROo;IRKw;rd3!D|5x@4L@yX(9-s()2+Y|02> zZBeYk!acm9xBRg`=slC|LobVl;qKzVMX|!vw-OW<-tW@RJy;EmpY(MXToaS)y(RYD zIZ9|9y9C)!+53h2)$IGa>Hfr>`fS*r;);dU#KvEb6_yX!sjwUVNM!X%gY`$#M$41cSKi(g}S7y z-ZjRpt9#qffd^bgSdUmS9f*!Ebc3J9ts$*vT zuM5ma8?Kllm@3RD#oqm}9ovtw@qJ;IE3(s)r?C65Gc~OB`8tj6aCc-2R!$L|S8G|Q zIilNE{i`Eu{(g!uS|_?}huX+)SUp9!dS1)IoKlxB~A?vi6zy@_jHad;Kw(ElI!Ji3iy&lLm z+d^O+dm)>+Rm-yZitTnT>W%E(UkL2kU}OV-C9u0fkWI-Xup2^=bDCr*7X>HowWej5yuJa#IKP(&gyTe`%#(iclcsteNJiF z`x8yJ+bUuyvI|cW*#66qeRPJv1};ao=UD>VAqm;ka|HIiWMp0P32c*9$i`gIvhY39 zX5X*98rh>439Rc{WLsP&u=eYaoqvVE7OzM4?o|SNcO$Za1qAlWkI1H6*RsB|JKApN zQ$Hc=e3QWL+l=hcTLd=yXJilGCa`H+koEqPz^?rT+1Ymp?6O~xExfB`+1)hT?L222 zvfb|!*!b4I6@e`} zfvl?`UNd%=8T0j1k}ickdj{ENO=Ry<*de!(O{z%H4f_+>?v4bu z?_Fd|C~VjJ$Zo1c&~0CYY=0*$%j)0QZs(Sdkac{6z&3hY-Oeda$PV-%u?~JgN7o|g{;vwMjcXIwiEhZArm$nGB0H@PL3g-2vaQ*fUiDfK+(EM0 z&e3lndzHdQR6};5CqXyV16fl&0^7ejvPBfOcMW8d>l1VXYa-jTftF>pY;3o)SPR(~ z6t+WcWH&V==)PYE*^ovA_Px5uIyEM+?|LGeNnxARLpJhltuCttX}g^p)JN9Ci@?@y zfb3ohTdg6oqni+PT^k|WuqlCcYK-hj3Tyv1vQysC>caU)n|b4v#I|TgV2iy__cDcj z)CAeN%?Y}9n?v<#iz)1pcadG! zlAyb<1+sm82<-1Ik+pB7W#Jx=&1W*(2ibHAo6!o{u+{|KwARSFzDHm;y@%`$3cI!q zvP0Vtbd%d6Tc@p-h5P?D+j*HUvIi;b*X@uU?@Q2~uH)9SE#ZMAo4rfo<0X*{u||Raa!gJ85-m*azEg z=jPpzb?r=G-|mj=4hmbZ2eL6{f^N+~Wa|h7*8Kxy4^mi{p2&_DwYsdvpzU^c?1ijX z7XoV+glry#eIc>ax)OAseu%oQyAjx;-pF31uz&VJc7AuQF1rJ4yPdE1MK+)ZfxYw* zvUe%$*>mS=UE5Qu3-`ip=I31@$oB0;V6#3()-H&^ zZXJkh8ih^$1liCJ3A!6Xk#+8^Wm9U0*lw%UpCX$@VUxm;jp{?tUHTca9(@Vyf^cN_ zQrNEsAv^jbg6{MPWa|fO+2aj9w%yK?2P1o&!j6kXc2Ylr?#R!P_3lq#heRQpPhlg! zKz8;3f^Jwevc4f&7V3A{?E4{KB71|v_8o%kqK^r>J%=LOX&`~^It^FpL#W*DiN8j*S4wf6q)kzDVrso(oN=j``6 zPoMs?*6bN(24;VEG4_Y?hD{nq%>6Feu`QMN_5fplHf>s*Iye2{DE&S^pJL4I z{1Rb*Fl_~6pLxreoAoj=_dC;u#S`|)RAX)xW4|@+v{%%*`Q4w@@AH7Sjk&Jjgxzi0 zbBuj-f-!gAtHj)GrtLR^u-)G==3Zp%X4B@qrq12>XtaKxA5Jso4jxI^V$;?eMcB}H zjk&8CTVUGg(Zt*?>Be0DF{+iVDeY^9m8LCXY{&PExp4`^+&t4Zex0!GGK{&Uj9p~f z(TT*|*6$m0Ta8t1vmr71eV$|5U5ss(Y0Mp$M9iIL+IDXcHs}Lm?g7SrWZDVih`Bdr z8FM?0SFN~~vVNaGFl_~6ulvxLo0&|^eb=<1ZxZ(EY-8>T#=d3R+!SK&|4i$6OSSTQ z;o9eoHzpdl`7C4qH0`WZV(yq6V{Y%a3H!TgYZ&|LN5{io`{y?28S{3?zc1<_tw#p*xCe!X>>_an* zxk(=qb3Zq2+ib!ZOfU#>$n>taQE3Vh4-{*U08FM@45cX5kmNWLQ*~Z+AkBGU; zO&c+9wlswrQ&v+jO2WciI$U?o`vdrV_T{ zd}Ho8#^#uI&NOwd>>z0OIbeY?xA%0yW}3E!v3?7Uxl3jcbJI*aa3*1W78!GGvs61Q zJ6^w6DW=V5>?PAi&L-w2EjDg*y*Y%fHtlN0j$UHSjh;))J!@M3d8(Dy$=05e!`$iM#MtMS8*_&*B<3D9ZL>wHm5mGSK0lpj%-zb^!=@d* zn3(&-$Hv^&O9=aoY44)21#b<~mmx zb35e`_Dj>2GuHm8F*p5VVs4RXyX31@UguYPK6hPd%ss}~HKxt_gqYiTl`%JT1z}g1 zwvw?ORvUA3KPBcaGi|q(s+HFy*6wrL&y2Ze7`wo&NQ%`Q`#+ZVh86ns(7zV(ty=jJf>_2>ZTiZR-eoZJ{wYkFoEV zc1WQ*H?C-uexI*eZ_M>wPuMq2yOObgn|5dsF?Vc{ahv@vh*al+murG|cK^qBs+_ak*8@<7pJN!#^?&>XL^!t3o zw85JQ8@bV#Tguo&rX9VRm^PZ@7ZF^?Yxt)pP2RtWAEH*%+2^(ohy5Z+SheUO&hX{u(xe9<{o40eA8y_Cg!%> zZp;naL)e+7tz>MI9md?;z3SW^6=U`LJlV9}_7S$hPGjyV#%7y#+Bd{p|F4a?uKk3~ zFzs2!*4t&wo%O9cSM=WW`#iz4Jr5AJ?rvl51;!?ucHTi^?nTq~IYijRJ;rUWVeAFd zE;y{tl}$YD^Tx=%#@zm8ggs;0ON@=*XUtvp9WnQ$X$O`Q_JwbZxwh|ByXN_O8;RuFR^KVZzQ_k(K7{v5C0=e?#~&Dg#N zjk!_Bh`BpVTmMJGK61#ITfo?_OdE5Ym>Y4}m>ckuYDJSkzt0;?Tg2F~GGp$r6U5v? z(+2)b*pTmxxy6iKZQ8g>Vs58$V{Xt%)dq@gn)W$1-?W<;+x~lF?(koTxl2qNTt(P6 zM~t~8jGbrN5x)|1TO2j!Han%-HohtPeV$?3t&DA4Vay$UnwXnw+E!->d(#ib+?|a5 z(6kA^5p%CUX3TAUR<$ANUiSGt)9z&KH9s126MrY>zHQnz=LlQpxG{GRW5=6z+<9W| zU#4w$LA5>WzokE)U;oLt&HEX9-n7Yo5OZHUVa#n`P1w_>-Ot#Ue>Uc({7KBMG;N0( z)kbD{+2`0wW9}iw9y9HPzlga}CylwCE)urfv}KGP@{2Jy?Qdf40n>K6q}t(usrvJI zV3jeqoUwaMoBlsyZl7O`xm|22%5~(t-sbJ5tzc}AQ^wqkI>g)(({`z=TG>p~zTW6| z+L&9x*e^_*c@;7D!869(5Ff&>Gwm_P-uIg^H|uI*?kdxE^;K<7qqp_@eD_&n?s3L` zY})K=h`D$CZp;m>N7%)tJ;B&p&lz*Gul?_HXT6%=Q(k*kHrB7amZNN^>CWexeG_{b zR_6u=F#dO&O&+bLn9(b@jg3e~uY95mmi<4ya@CG? z%sZa%sJ5e-lQ$ZV<3Ma$38ZJ25O)|$QY`>ry3<?rCn| z?`E(3Q2rYcrPrCg^24GbDEB$PdmYhgyxc3la<1%^k3O)+QS{N<5joXnue?my)mzqt zH^{%{AH8zLiY|1)*wc>87v7Jk$~JrD(#lR_=#d-#(JQ~w_UV>pbc3@3vm!d@_!{j( zY2|Ge9bM5Lz0=(gywb`}@`|DLL{D#0#HDY{Uio*zb{Se29x%AExXq0{ThR@UJ>10E z=ECfVgiB^~T-u7UqvUzn1s!Cz94jZ0ce*$G;c0dcb_qhkVH1x@?_C`@^q(M&H$U&$fsOza$xR zrImA6&g?7pYOY5sd#4>`l5w@h|iZF4_J$vou_?7!CpKsiQEM031t$-B;>(`q6*mfvs8-68CZ%@4_b zZ>aoxVo$b->u%$@IiOx0N4>mGZYyuIuwn8$lb)=2{JN%P=Q`v2Ugz+wV>ZWkin+&k z-(uhYdSzJc=J=KS>^l6g(HtN7=8cZ5FB=RR z8*VV{*TTxyvHbfS+0^u8%kCTCvK?LSj33_GG3bM9+*ZyF?l?1ikbeh{R(8W%EMMtt z`&fI&)-iPqyGzXNU+{hSn%_SUA1GSdp5`V`j4`_5^E%w;=ve-{(GHeY&OPy0Ww>}O zdbF||?!R(_vu&pb9rK*O8g{p^HRG3shosk$Ep3mssQDP9{he{KhoflJQMZ+I<>Ozp zRy|tT{%-r;*Uswgy&YEjdykkaze8U=p!GjG%U9mNZR@`+9>0g3RS!Sq@EP=tF;`kS zw~g<^;&D{Z^IXvlkM{r0SvWV+vFne0hTSWyXuFF|3!)^+jbOh@DmKE`8ETKSwK-yhZPXkTf2zWb@s!uGBFP~1o6^MRq}@7Zs9iRAbw7{p`tB>Eg&h($ z%TY2X);(9QS2muDM&)>(E85?^-?`Z}Z~q*JU(nlz-6ZD9wtA}-f%Z-3mw2|K{rz6+ zAXnfg3&rc%rEXh!+o$1Y7T;!1Yk0`B747e-Pd9SyT(ZoO8*#{OpIV` zuSCDwCEC!rn?7-L=zpE@x=!9^*_9SQi^OZYdpujw?_O4?iED`MGe?8`d)&5t+I@C$ z|A^OEF&=G$lfSz5IGVWHzqHn2wXo&fvOmMc^XqWqG3@F7-G2RVuHW3&>{+)EuiO8P zmF@3#uLiqv#oTG@qK)?pNn((^y>f2vkaTxPTRA61`}+gYvrc$rt)rW5yxYoiLt1%G zd%Ddw(eIY8llqNV>nObTZN=tyukdU|gIn5mqt-fnellCw@-|y@2Y0>PNmp*OXmCsG zm$24xY?AqYu(ll%bGc`&STwkgC9ZW;e)~@=zH?!(-K1758r&sstaa3!Qf=9vmprc* z4enXVYsJs9|7g+RPJU~xqxD~^)xPFOgM09VwT_+_|D#2NyVJXC9eG#S&?Ce3?={fi z4$N5VIQ3sG8r)TxghhjU=ZAzvgL_d9VbS1D%Oxxt+)-0iThj7X{XV0?-E{_G(cliA zO;|Lz&&?$)8r=IA5Ec#Y{KbStgF9=PYWqZc*=IDkhkZ;~G`M@NAS@c(tyd8i4Q`(` zghhk9yg;?$J5%~|5)JO+^@K%(drC23(cn(pNLVzu`)?*J8r+>r35y1I{cWm^I__nk z(cnI{gRy3VyL1;}(coUPm#}DXr|c&z8r;zbRa<%62>m&U2DhV(uxM~M`<}39a9=t~ zSTwi~93w0m+yy@o77gxcm8u;%%*#Hb!9B8yuxN1iK22COxI3IBEE?SY=Lw4j_lZ9U ziw5`B8r6#LOX|;OG`N@jO;|LzQ*8yxYcsEN(cq4`im+&Khxrl~4el1#s&>LJUiJzN zZd-lAqQQO0pRj0f7v4x%G`MFpAS@c(qk{;G2KS&Qs`c;in*N+bgFB=dVbS0YXh~Q! zxX<1~STwlzwIM7T+Ff!QJj&!lJ?L7eZJx zxU0IVR(#)Ce@>#oUGgAd(coTWCoCG=Y265m26vQ`uxN01>!I3Zzj@g!G`L&$A}ku* zK79y_2KVuPghhk9cmQG1;GQ#xuxN0nJgM5wok!}=Ni?{JL=qMa?$Boliw1YIXu_hw zeJO^pXmB5Sfv{+BZ;VrI(I_wbj0X3#c*3H=o%AYU(cq38Nmw+v!^RL64ep>s)r#MT z>d#3uxT}*0iw5_h@q|T#yD){YXmHPXo3Lnbk4qyg8r(zQQ|+!lyzDa?-1bbuqQTwx zL&BoLT{DreXmFQJA}ku*MN?Eexa%nWIf(}Mtm%YBgFAT^VbS0oJeRO&aCcomSTwkU z7ZVl@?u$!REB~%Y`#SgL~^&ghhjU-d5F~``gPtqrsiJgRp3D$Lu038r-hE zghhk9?S8_d!Cmj5YRB0}>(5CvxGN7677gz0<%C6pJMSoA(csQJMp!hshyO%aG`Ra! zs@6Bf%RZyQ-KmPOXmAIfCM+7<=YAtB8r=KO5f%;Z;y(zB2KSU2)rxBk>Cb00xD)>- zEE?R=wsp#9n_lOl!R@$;uxN0%@g*!8+`iYUwz1C`{a&HLed0R8qQSk@pRj0fFT0Vj zXmDpVAS@c(@j-+|gS$@?)y{Kz*=IDkJ2oRM8r%Ub35y2z*;@#U2KT-;ghhk9=yuh% zPxG=@XmHPJPgpd#lkXxd8r)Hx2#W@Hw|fbT26yWa!lJ?L^MGo_wZim|eKfdFJxEwI zxOdqJiw5_KZiGdHJKITEG`L6hP;HoBf_|^i;2zkEuxN0H^&u=8+$|m>EE?RlfrLea zyJ8Sw(cs?nq-y1NkhJe5(csRDBrF=-na>aw4esI5ghhk9-}8h;gS(UX-qe+U=hiRN z%U+?u9rzMq(cr!qPgpd#%U>ld8r;Pr35y2zk}-rugF8J@wc=WT`g0Nu?zlGyiw1Y^ zWWu7s-Q_L9qQMiz@a8Jk~EE?Rg zxr9Z7yWdpRRz2oruh8J`G=s2ca0kvNEE?Pw=MokT?(zkMMT5I|F=5f*Ub0NJqjS9M zGaB6K9}^Z0?zk0%MT2|bD#D_{-DM47(clhRr&@8XPW|H-4Q^WzVbS2OC?+f#+}k%2 z77gya&4fjRJF}FqXmBTNQ*BzqMEyRa!992upAZ zyT1=%(clifhOlUGH}g|%UemGqeMW=Z_jwCy=mcaId_XuxN14Xr$UM zL%r-38rLmp!7|J+~z3_ZbcDlHPQ7iSxDy{IEE?QH2CH^RjF-JagWK^G zVbS33FqE)paQif`g(91rf!JYRqVbS2u9Zpy@xD#F@ zEE?QVqX~-!cemG7E3S2~fBd4s-9Cx1XmB?ePgpd#FTF`vG`NqX5*7{at?v*P4epic zs+HFm)BbJ(4ep%x35y2zxGch=!5ur1uxN1in?zVNxI0Zzt-KbEcCXOj4xUa}G`M|c z5f%;Z<8ug$2KTP{ghhk9U=d-_;GVZswe|A6>@ynN8F_?7gFF5c!lJ=FXeD9M;I@B8 zSTwlX6{uFUD)i?h8r*^F35y2zxz7oU26y=e!lJ=lx{0u8aIg4^uxM~k+p5~&j^p(E zj0Sho4#J|r9kYwDXmI!0OIS3xL-rFE4enM4RlDF-FMEXscil3=qQPBNPFOU!_a7xJ z8r;Rl2#W^ylAj2R26uL)YCEj-vd?I6Csh#^4erR(ghhkfb(XMbaCbaUSTwjBSF2WZ zYV?m^G`Q>jMOZYrtNtb|8r=JB>y^(Zz20UtxQnkMEE?QPd-)A(q zN7g4S8r+dL5Ecz?S0G{0;O=-cVbS1j+(@-s$9UN*G`Q;q6BZ5bs%C^mgL{8V!lJ=l z+?udxa4)%yuxM~+w^MEZ0x$cF26xh(ghhiprXyj|;O=t|VbS0Y=|WgExLe(?TG5%( zKYr2R_6sE}8ruxN0va1j;_?rD#xHYPM#zt3oJr}ic+8r<=H z35y2zp#FqKgWLW%VbS1jH(0f&-te+lXmAHUMOZYrFFs9JG`NpP5f%;ZUC$8~4eo+i z!lJ>w>_yck6noicG`MqKCM+7<TS%gKu+kY-$(eJLFPgwN3D;5zJ{qF5cRV%K| zuit0%yNmJ&i+=Z_PY8>Cch*Y6qTikP8DY`yjxJEGc>L=33jOZh>j{f~cUUoD(eG}% zk+A4@2W%!R`rS2O5f=UKs;#Q^J1}3r&**pW-$7XPyNh=b7X9uedkKqvclLh5qTii* zP_>JuZ`SV>`rYwmghju5(D#HzzuR6xSoFKw{YY5!yMs><7X5CYldA0&bzHyC=y#v0 zA}spdM@|zK{qC)235$OB%JYOpzk5ctYDGukLG90O^t&hgMOgH^6D|=J{qCqbMgPxh zBrmra{qCMVghjtQ^cupV-`&PfwMC z5ElLJ3Ec^ce)sU6sudkU{XV1L9rY+-(eLirkFe->cOF1k^t;;(A}spd{!gk_^k(#X zg?_g!lCbD^R}LjC`rZ4UB`o^gB{76WzdQd0!lK_jCr-8EIyU-!M!!2fp0Mb5k9?J| z=y%7ABrN*feZ~+L{cd}rYQ_6*{a&Hp-R=#-qTd~yOjz{0ecmE0`rW7ACM^2hN74w3 ze)rb*RGS--q2Fipy9+Z3i+=Zl4+)EYcWw@0(eF;qB`o^gBc`fW^w#uyg?{&t8H7c@ zyZ3CuqTk(R9%0e%Zo81M=ywM!QSGsX)Af6Wes|sFghju*>SMy9-(9|fu;_P}t|Bb@ z-34n1i+=aKb*dGgn`!s?y@%$T{ch2hzVdej-hLO;>Dxf`yPG=g=gr@V3~>K0=1se< zo7Q4>Khugvbm|$?8Ua4>J1aR?XMc`;*R&gkI7@#qZTo*Y_pbRLn^yFt<4>42sFtnW z9KW&?cR25;Y5zs*{7O6MZ0p{#Z72KE-dgAX#(HX<-z4g$NNaMe^WZ#?cka9@JgGsxXDiy@ZSLyksM^rn)!?q@+;&*@^`fypKYZk{Y|mCa z{sx^I>iGPv=B|=CVQ#zPwI23ef8>bn>1!Tso$z+D{p~o~ySdB1LuV}@4t!Qwkd^XYX%(-T+m{labKdc|jq;+#D1@i8bn+A|_IILqt%yv?Hj`QWV6i#=M| z!oIMg#_?oQQ`g90rtK|mv;6EztW|5Lyz{1OUZs!I=$=N7Fy=}dlAaPSABoeZPW3!j zw6Ke|_&ejy2fL0fe9Ev=_r%YWrp~StgI#e&-P~5LcWB{@ zc5&?hk5+cHXY{$n`M@)xFB{R-u+NIQXBN9e`};Z3hmH4aMGHHrD8zYpUK3Z)p{8z| z-~Cy8;Um56U03(;Xs3KWNt{=q&U5cKaYbe|a$9+u+ji<9I=l{#R{ZRAwXM_FIdgAg zS4jHRhK&}tIia|PU7pjO>fPzlzH@1|=#>v~o{MVi8WQuzLv_XdBCVV&evaA2zr&-I zJ!{ubFF1eS-N;oh??=NvCv3mW1L2~tE*}pbZP)Qj<@1K~L!U;jvwL^Ct(;pk{$#lL zy@*FETi88rNO8V1JIEE>vBa=3Vs1h63C8oQJf}T9KOcE_rD$Ql@0|8pLvbJFyRDon zJKDh=%RO4z!tS}`1Lxoe8oK&rE;sD+#@wsJMQ2@W&-Y(%yvBLv;|8wMUX$Hc&Xu=U z{%rAdn`KA)(1$ae7v?u`C06DdHdf4)uVF<;TWeR2DRS-WHOpyxwSnu>Hy^oe`K`Xj zV=y_c$n#v$;4UA#%-Q?rKv%03?-+KhnA_Bj*^p6&O%ii+BYKIJb)a3oNAk2+4bJb7&(F>w3j@Sn zz2dfVuHS*HwALp2-6JYXoGCqSa1AYd*067gxi#bOu*;vl@_nqQxsD6_UE+2--~GVf zRXyM-x0Q3n_q@X8=LMduO|-u|_1Nd^n0~#hWBEYCjuUgY&%N5da^~qUot<*vdsj}I z!%my{`QI8wpO$G6ux z>TLGYb*^qvj~a6)3MI!bXz(1!tD=gt#Gbk)xS7L zKkMfzo^9G3F;~2<3X{!p?QIs_@a+Tsa28*8t;^n}k1eCaxUG)8Zu?U0xp8+x(H!6H z<7)M*uPbHJb8ai=rfzsodrrz;`Da_Mc3o58>)LdFm|-W0x#IaIO#BSgThS~3`#L|@ zUu&)wuT|c1+ZGer+gnWdB}_i|csj3MNVulEJRdWBt`;I6AZ+83Xy z*L}e&*SqAJy9}LZ*j#a&m;JUkOzyV!`AT%se=WM+HMP*kHDj~+T(?Zvk;85j&&}iP zS}Qu~^VbEq4(_?ib?)yK#$0J*V|~T_`;1-uO@qhVY!jVy`@J{0TKrVkl{tB@+sgMv z`Q0BD_wh9zf0mux64+fnH*bop>)N^FXSbDmwQ^=h?axr*x)<7Sc6Iq*9q}`zj`4GU zxwy?@kH!7dT0Gyi_Bhuj8tdl|HgpYbS;ysC)5dK(tehDxZm+nX270!lvHnGaMy>$6 z%@rFv&}}m(p9&YVg-`QrMPq$zYw-vWM8{% zRF(D$@oYt(eN|Af%NF;CGiOSO+vYVLV)spXT)a+-^K3<*{m;R{u1(qJopBGov1+isuk}TEoWnHohA@ThV9FTovr9IP#m*FKDCN zCN+4)F5WMMm-hO@vlV^z@q2?^p}uFFv9W)+t?Zo!3|<#*@6z1!dv?)hpLH_W)$Gnw z&U53M8=o`CeU{x7aUDqcUecp|tH#y+g^R(iHLp~O&kTCFt-SWLcr1w5NpE_zHqmys zY|r|=jL&3(#fxam9$OgfalOB0_UIlb=B_s8Mk-c2S9IZbg*SEi3%la%$O!q|m>~Zn zz6T(0v*`MGoNE(Z`2X3Px&q>>oLz5y%WdU6`MOZN9`|g~g%5nW%6YNf1jRZssythC z;e%ePa`x|Ge#R(ovo*Ksv1-p2UHG85DrcXSs!fi&=-HwR-}uEUXX2rM+Jfd+hl`(w zo-MlY!NaPY0Y9s@_UEEkepX;Z7rxmGRn8H=szEy0MGxGF*wCKXOe!j}t z3ZEfc`<&CrCtN!hUHCRJRnF-DYSD#n`&^a#v+I977hU*v(S$`8zWuYR74IYU>;2j6 z!gq)wEV}R=pCK%|@STPd7G3zxPph_P{kr<~q6^<8lCbE)hdf1Cbm6-WAuPJ^p--w- ze2%MMFS_tyg9(c+y!{Enq6^<`5Mj}UcRa3I@norAFS_uqfrLdDzQ+K-%7G3x?tyC*Mx7M#0UHF2Qghdy= zumxe!g)eGOSajiwo2j;Xz%l)L(S_gGl(6W+Zwe+Xy6`1U2#YR!X=ByqcdyW|7hU+R zjR=b_{PrNiq6@#XAz{&l-*vNUDi!OZF4}?V*-d>?v z@i)2p^`Z;k?I>Zo&xTlH9he!b|zkKRI9bm0?935za#;#Y)47e1*(wey^7^y@_z zK6x`?(S=XhL|Am;Q@5x0q6?q-xoZ78 ztkka;UHGgb!lDbGy`He>!siqc7G3zSSajh_<`EWM_|mzm72omFuNPhT zt#b&AF8ub{ghdyA=Pbga3%_foYBz41t6wj=@Ox$u7G3y#(+P_%{QhZ#MHl|SRMpDs z4rm|0=)xbGLRfU+%O(>RUHI}`!lDa*WRhydciZ&qMHjx}Bf_E!e=LWv=)xbLNLX~? zPh_iBd{0`xUUcCrKO`)=@KsraMHl|m2ZTiz{!FH7<@F@A=QFzSXWu6*y71>R2#YTK zh4%=HE_`*mYWrnQ)~^>`_?mYKi!S`dG{T|_f9V~qQs7!5f4{7d|M7u;{`!9!pqs;e!)ZE5Ea>eXc_nzS--9MHjwB0%6gGZ#9On=)$)i zty+1#AMJY4g>N&8u;{|K9Z6Vp;oH4NSajjrk5H|+=7)a0=)!k+m9Xf-cN|Vwbm2R_ zLRfU+JIAZGcSyQ^z39Ssd6}^2!iT&>SajjL#t{}>_|O+sE3c8DJ)hBq4;w~Ubm8qU z5EfndZn1<#7vAx_YQ?pg^y@_z-W5Ywbm4nEM_6>>dqxu$UHIP5s#aVNM!#Nk;rm1p z7G3y$&kz<}`2Ir)i!S`Yr&Zg=H$}f*bm0d@5*A(f!A}tuUHBnG2#YR!p7|g-_^BSajhNdl430_@thM zMHhbDBdV3xanzpA=)x!WAS}A@DcuQ+E_|wsu;{{1aH@9y>o4lpi!OYcgRtnrr$-PL zUHFV{ghdxV^I_GBHiv$_=)z|`L|Am;v+aaM7d|JPu;{|)hN)IuV^qIhbm6BwNLX~? zr-c$0UHBOf5EfndSzT2d-0^AsdeMcSb3b9xg`XEfSajhR+(%e+;TLsLZCb-8_3K3! ze#yOrMHhZqXTqWjpLY*o(S^_Nq*{6XIqmZ^y6`LRCM>$}D?1VvUHH{^5f)weH62td zu9K}_FS_srcM=v|_`>#tMHjy44#J`fU))Z$J9qTfuNPhTjkgmPUHFY{35zcLrrQXM zE__KF)yiu>YtLtN;Y)8NEV}SpTN4&t`0ck47G3zAtyC+oSFK$yy70SN5*A(fJuL`} zF8sdcghdyAe>2s}Yu0Pmi!S_uri4Y?{ZKGr(RMFuLRhrj%NwgUIqpIIdeL@2(ulBV zyH^Ae7H#)q4GD|3`|+DqTlLue`t_piexdi?;ij8wiWG`&oa&qV0a}dexS+yj#CswB0Y%CoJ0T)z=XgZTA{K!lLbd@mkf& zCV=+&3T^jG^$3f$yN$7EyVt#jn2WZ%k1t`-cK5wnwX;sQ)$bMB?)7{Ki?+MpRfI*` zy?$N7qV4WqN44_0(b{tZZTA2hVbOLE{NG9CJFQ;lqV3+`5@FGH5BgiRvT>nZFWT;n zFA^4Q_u#(>i?(~S8p5LO-r`Tyifa<<*Ne7$t7^ic?cVwi!lLco<^o~Sc5i!Lwf!=K z^y@|2z1=y&qV3-Pcfz9W-r+1^(RT0nn`&iuMSDJ@?cV7OVbON)e44OmyLUN7ShU?k zepRiwcDsJPXuEf?H<2JwXzkXy?@bm zAHJKgXuFTtMOd`mM}AFMwB1MVRIO~LY1fOkd%_OFqV1lzov>)TCv77v+V11Fs#aVd zNxxpS-IKQv7H#*GQo^F`p86GG(RQCuqFQks8~u9Gc2C<(ShU^KHxU+X_lz$Ii?(~_ zM%Bs|pZ0u4+dXRoVbONa{(`V*yXO=W7H#+3&s8hCSlacX?VejiShU@ztS2nm?$Zhh zi?;iWb*h#9G3|QMcAr&1ShU^etR*bk?(^0V7H#)=pQ%=M8nx?1+kL@m!lLcIXcb}6 zc3-lRuxPt4`&6~EO{ZIL`tAOY7|n6fAiVOrYO+S8vx{z-Xm;U6H_>XWXGkR4N%FsS z_DuRsW+%Pnc6VcawnVVHmQatGiKZ28%j#oh4`|iDv~T(TXxgL8|DVSCn?`rsQ_JuF zv!x#9HWdH6{P$Q#;~%q$#=7{A)mZ;f{sZW%%cC{dGS*QxrDbD1pn7bC)mWESHr4}E zZL&@4ZZ=uQI$DkOqESg=w%J&hR(8^7Y&P2N?#8-htfTBC%f`C2R%89%t1jExjdjad zN2{@J&h26JylPvyaF6RnV}078w<4^@`avJFu|DwF)%Mzrb!(n=lpS5!Sa&478(}rp zrIn5KC0|`9I&+sB>z1*Oq8)5B)+>g55MederIn3!*;=mMShtLIv>NM`u{jY|V_jN# zK8N`QiB8<*#=2#!qt#fCd;g;dtFeAaSoyOzI-rqgG+%D4TgE!dW~pqf_a8hx!fLEb zD;w)&e>S&^Ya@C*H)O+8Hr5Y}oEKp=)}@t=_1M_kjLxga`v{HotRI#}SdDdQWn(?5 z!Cm&+jdkm`T1WYOBOB}emwXywHP)q-jrGPp_t|SV)-7Wlt;Txwa|IDrV_jOg&*H6> zSnuV=x@D}R)mZOyc3p(kSU)VR{C%}(CW?mW<;J>YtfS~r8;$kGqc%lYjdf{dV_i0> zYd6*{V;yaxv7Rj&>%(8$8DTZnrIkJEDX;Vw{qE~M-bYqreOl%I2&=I!tz7S(Tb_~~ zL3iiTnr9uY#(Mp32gQ2L#(J5s^1IFpUVU2hyDvA^En^*RXsloIIW8XmW@BAi*;p5? zMSJbWx@D}R)mZNrb~eIltV=6zb9BJ#_S%hg%UDP8_%#~qBrKZxL2L?&W9ZwHxb}v5r<_J?^_} z99CmpTDe|%9h=&Xb<0>s*;t<=8tX&qU+1tI>sy7Djdgjw`r3_k%UDMn8ta!HZ0N8W z>(a``deEUh;k6sg7F0H)HF`>_d*KVv^#yVP!^)}zOc7&j@zD-!! zSdR&v9bUV!ZW-%nHP%bc+~Keq>(a`(; z!}oGy-7?nEYOMRMeaK-o)}@tm&EE0l#=2#!qt#f?zoxq*7>)HE!pg?FI5&)Dxu@sJ z@6lKv^zD=4J~A8Y(#pBwDO$9pFE`e$dDhWttY-~*+F>=;cN%lA7UzajYtNyvUjL3U z4y&;)t(+?#gS8v$);#Md8|!7FvA$r#7>CtZ|5{l2S~6g8qSm?_>*6(w)mWF-Y^)pi zvAZ2?&9ja+G}h;Zr8}&~dZDngu`bSKab8_+tXsx9T8;JC!uK3jV_jOgUh#O)TC1@h zx_+|5YOJppRyNkfHQ2*zH`c9r*3oLLcYJ)h*ekQKF0Gs^8=g9AHP)*`7dfoPdXccQ zvA*g2?V@*lxv_4|vyN6{z4*F3ht*h@R?eNV`5}AlPP%2Rqt#eXyKl9_YOH@QtZc0J ztRJnl?#B9?>%}=~w!ft{8|!cWqZ@9`vyN6{{nFh<4y&=g6t`JEzt`@DTgEzCjrHKZ z8y!|-eVMSbu`aLWsI%kISnuGs!(lbnrImZtAV2LNy>e@wb+j7mLq_d%SdI1N!pdt- zFZ*qx-lDNy|CWOetFbPvY^>LeKdH4=W4-b2!w##lF0H)H=?~kq`z+2Nqp_a9s={G4 z*7JmwjrHWXrrP`0YOL2ZtaMn7b!p|?;@J;q&q=GXUi!sJht*jBSXkLD3`u`jZ_!w9 zx8oq%komOLATDey(a``x^K$OqVasWv2GdbXhUPYa&Mr!u`aD_thZWmk9?2gaj$G>thaf& zp}Vne&b`atC%T__t#i4tZq2ifHZ;~VZ)oCftV=5!>$3k{yRmK=>*#K*Kh)G|HP)py z8|!b{Yd6*{V;!x=dO}e%cVj(GSlL*Yujy(x)-7WlZB}Fb=%UDMn8tcb)-tKO!ODh}evcFxsv2GdbDBhPDjrDGC z-sx_vODp$S^sGhS{c>a7GS<BZ9U2d#f^Q@!QSeMpntRF*TUE11> zb<0>stFb=Cm}@rHYg_GHG}ezj7vgTL%LU5Dx}00Pv2M+?j#gv+$nzoY#=5aNVx3#Z z3^nd!Pp_YCXsnmL5aMpE*S57A>()H$XhUQDz>6X7#(HgAyRmK=>nPqkcbA`2?0YH1 z-B_<}Yd6*{V;ya1tnZ2saW~d2+rMC_e81rF{k{#2_3gt$+>Q0xw)W?u);#NIHP$zc z2yr*oYunnN$6CfZiue1*dW%PfxEt&L+M=;uI6B1LShsB1pF{21xoE7fNeFQ_*8jCd zV|`^}i2Jkae{IoN&rc#O8tco(saCvq)~^?h^+m~qMPq$l3SrS$pOs2jG}fn0Q0=KV zysQ_E_1rYVqOqQxPFOV7GcyQ_#(H|DY7>gRtQU>-30Z_iV?8CCuxPB0%ONZp>!T;B zR=m^EpU-HlkC;qYG}hy%5*Cg1VbckV#(K<5)uun}WxZ&uN6jWI8tajB35&-1;Q53_ zV}0O4)dr?|SuYyv{T34zjrHD335&+MV>w~bShs(y+8LX@tQU=S`zM4&V?E?k!lJR> zc@<&NSnv3mYTNietAG5WvEF_yVbNG`yNxXsIY(O9qf zny_fBU)W7pG}h1VB`g~2mEWjVejckmpV3&a{Fbn2tRFu}STxql4-*!R^+VsOR(@Wf zT`wBz`@bhF8tZ$G5*Cg1oj(v3jrFZRs#bossa-D`>m@%C7LE0dKNA*>^`eu6MPt37 zO0~y#ds#0U>#I)@7LD~4X9$bN`jWGRMPq%zIn@pid`^G=qOm^b0%6fupHWR%G}foo z5EhN~oQtZh9^hrYXsl;lA}ku~8MgbC_xs-G+Iq;(bJFS(7LD~(AJvNAk?8MVG}e=S z35&*hQa!?=v7X>ZSTxp0)>p0H0Wa%CV|}#>1^MPog>fokR7ercbd z(O4hakg#a14{1bLG}Z?-AuJl}{hO+`*$^-5MPt2BbHbvr-m@iP(OBs@XqEE?;b?jS50>mBY?ZO3vi>qTR|-Ccx5W4+DYghgY$)jfno zW4+nEsx4^#y#Dcv#(LxX2#dyggZl}K#(Ka5ghgY${)4I&*ZR}17mf9L;eXsqvhT(#x5#_AsnXsqvgg0N_;Z-0`oXsnk$MOZY}H$AP| z*jO*?MPt4A8N#BmUid6w(O6&e9AVK|U-`Uh&n)(`UNqM8Umz?R>&spwEE?;JULq_S z>+|AOn^@^(y=bh@8ctX=)~AgiEE?;%BMFPfdiH45)@}cS{_%^(dS(J)(O6GUBrF>1 z6Ossv#(K&))n>fxWxZ&uk4q*j8taKEghgY0bSh!dSRXM#wGHyUtQU>-_%y$MiZ95IqKYr0z?=hLM zXskP?5*Ceh`*gyhu^u{8wJTopvR*XSLuL~ejrGoR35&*h$N7XsW4--C)plL&WxZ&u zw_QwFG}c=$B`g~2EtV4&jrHJ1HASiwT>$-h(OADwOjtD5Pi-J98tau`5*Cg1 z%FU`BTIglHXsjRqim+&`S8O3H8tdiT2#d!0{vE14@t2qNqOrc`Yr>+jzH>KW(OBQQ zm#}E8mwcnz5n*xq^BIlxjo%U$jrF2~ghgY$;4op)SYQ2}YUTG*wa-^*tgrZq~whEE?+zepGGh1~2PHV|~s~ghgY0#?ORBV|~g=!lJRBQ>EJabzjoozi6yy zogyq6>ltSVi^h7|S;C^Ro_bETxe;F0i^h8L1;V1So>Wa(G}aSp2#d!0$cw6NF~Q4v z(O4gTiLhv_$Jx3nzdQCi7mfAUx`ahzJ=#aLOTO~5UNqK+`Vtn6^&#~Li^lpOKfrF#(Jlws&!;|SuYyv9hwssjrDdd35&*hn_CEr#(Jw;Rl9SCm-V8t-t0EQqOsoi zcEX~u-rx?xqOl%ur)mfJ$Lk-zXsp-2i?C>{*SnjrXsr9(Ls+Y^elKCsSijgswMY7T z*()^Gt3wEj#`?LgghgZhOekT|Sg#6Gt+-B!{@g%g{e+#cXsjQ5n6PN9ABi9=8tY|F z)t=q!WxZ&uALve4G}iY$LRd7`cl9DH8tdC1RjvF!t@gbY8tbKf35&-1rpE}2#(MDp z!lJQW__%6)26)9_77LE1Hmk5i-dV0KS=QJ6vfBd4cK4Cav z(O6FzL0B}_$BiT`8taLpRogDo%X-mRADuv0G}cEX5*Cg1_$0!ju|8~^YFEzmvR*XS zW0DDr#(Go=VbNHROeHKD>w_n#HuQ*>^`fypFpaQitoKVNEE?;*GYE^udXG%imb83T z|M*2?-H}CDG}i6eghgXLG>5QgtcOfeZJ%f_>qTR|^JK!JvEFeiVbNG`Kb^2>thb%1 z+5_{wtQU>-*0TwV#(ImnghgXLcs^m#SPxpL+Nk4R){Dk^;9|m}vF^W=uxPCNEhj7* z>%JeWw(_vcaNEE?;VRuC4A^_rE0MPvQKYSoS$=4HKTte;&&STxp86%ZDU z^~yrRqOpFwNVON2dRZ?T>lMX>MPt2u17XovKlCMG(OBQVS+(*SN!s^=Xsqw~im+&` z@7zLIG}gCnBP<&0B|B8>-{Cd=deKjmGacFHSW){Dma z>Td~)#(LgC!di{>!-Pd+eaUyK71x~B-@jp4FW7LE0+p9zb`dd5k@qOqPAOIS44`_v;W8tXm%2#dzLtG;RncO9jF{Gzem&7ZJn ztal3_EE?-!frLe4y=w#2R*dzsUNqLbG$bq<>zx`A7LD}|O$dv|db_5o71xi{-@j1&2Awq8taX3Rjs^EsP^?U8tV;iBP<&00k;ztjrIC>5EhN~dUvXJ zoPD%@y=bia+(lTcv3@sU(OAFOiLhv_S9c~X8tdn}sMa^d%U+?eekO#lXslOtB`g~2 zCqfB}#`>`^)n&1Nui^h84V}wOxea!&EqOrd6an-g@^Riwv*7Khr zEE?;}o+K<9>x-TuEE?;Jo>uMZEne1(#`?Tx2#d!0v}Xy6#`?792#dyg?(?b*^Gnb_ ze$iOZeu1!PtY^MRSTxqtUm`3T>l5NtE3ZqgeZE3tJ!Lpy(O4fhg0N_;Cypd68tbD+ ztF~XJm-V8tJ|cmzXspL45*Cg1_$0!ju|8~^Y7c$wWxZ&u$0QRLjrFJ$!lJPrnMznR z)(1~et?VCZ&u28&2c{7gjrD%%ghgY$cLrh6SnrXkTG>t0t{07UM;2kxShr^r7LE1L z9Kxcp9x_R_qjS8h7mfAKlL?E)ddI1RMPt4Fbi$&s-gc&HFMZ=>y=bhro=sRZ)?3Ua zEE?;<^9hT_deB1ErZr5|pU-Hl2QDTo8teW`35&+M-*UpDvF`h^YGuPn`}jp;-S-p1 zqOpEy1!2)xzqFFDXsp+)R_(N@Ue=4o`h_)wMPvPJ0b$WtKUGLrG}bGNR4e+B`ui7+ z_2b2aMPt2U17XovFaMITXsjRFtlGS$WA*DrV}1WughgY0&lbX>vA%N~VbNIMxH zhI&~q8tWxr6Bdp2lHG(wV}0XZ!lJQW^o?qZXM0&M8tVn$5*Cg1)dvZS#`=oGghgXL z?>p6s=9>0=e#C05i>|fRShqSgvbm&tZjX02)`z$o>+NMMNjPtIqt#d!P3-s+W+y$U xmesX{nplnXIq$n0>sS3t8)Y@tMNfNKg*mrO-elu8+ibeI|DQeWw*Tz2{~td|m4W~O literal 0 HcmV?d00001 diff --git a/autotest/__snapshots__/test_mp7/test_mp7_output[mf6].npy b/autotest/__snapshots__/test_mp7/test_mp7_output[mf6].npy new file mode 100644 index 0000000000000000000000000000000000000000..2db8df7ae51072921f5543554221365dd6bb8728 GIT binary patch literal 217104 zcmbT930xJ`7ssE9h=hoW`@V&yq-bO=yyx=nsEA5xMrK5WMC88VE~MtZg}J4OyOyaT zkqeP)F1Rnb@44WL%U{jZ{`Z`FXL!swqa8jUdZ~Q*ow;x3-23iweu?cqXdC)b9^1e? zL;a$<_4~Z9U!zJN*YNB8S>OI&e%`&?mr;JTEBQ6~@*TgLwJYWP^%s464d|WgU;1_H zGoV|~&%5;;&?B;2-(3Hc^SiqD`aIXa^#8I)H~foe`TGau`qSWCe;ShOPha7m)VO-~ ziW0v_{fj}Wl=_;Gbi{xACV@7t|kzuw*Ye%UMcMMISR`ghB@-cNXwMEC03zwc)~ zKkd~wieGM{O3G*5`*q`g-!JB~-re}0Yt(-?pk8g>Xu~U6eRZ`XDpiF-LsZp%R(S)bf=So2{$Hp8k7;eM#@#(-?#w;OVzq547N{aIhXA5ZKHazIDCFaHn~>f#WGnXb(?gysGCrnBVf zipqd~)igG-l+)pyc*{P!X9rWZL-a%j?bSS^qVi}(NsWbeu5=_GA*(0vhHD}{DF-0lGzE>(LSy!(&hwBCQ&ah7%XRG~T zfB4+jluf<1h1r^xV;cf1D&^v7mdX%J=qp+b@{1yp7rJ$mZ;PTm{AV z?d8a}X=?N4(y*-NpN>cUPuD6ecBKSg6R77TGjHU8ATh^3jb+dT^@ zw#{8w&Fu0@*}Ic97Gf@2=M($Xt-qMEyp0uG=YH&5$MQ;~&j4iG^O%nfYVA1RzL4Ex z)jU(SV-P$)L)b?($}2lFztULHjhTKsD9Ce`DQmk>OYyt;4LdfcoZ|UmCuBppZr7<* z9Jk(oFUWWGSX1_=9`zJk@+3CZSx)ixZ>6!I+pB24#{AWkoxZJ}5_xkH+*?Y z&vh>^{k{pmJ^7b8%kK+i-;x=u-Hx*SdK)6!jk8ZW=5w@*?9H#ahpFzW{7wa)B{!w4 z65hyPVEsnW{%1I3X2g6ld0llc7{;8@Z- z$dt|hN3_CoJG<^(TB-9%L5&67ooz}wc2ufs%Gx4gmGK)gSmLo#O0ZV}WMjB4T<4BT zK90`b{-*4O&Vv+NrA&72W=W;$>1WL#=Ku@3<-7c4-?FE&Da-F=W$?_sZ1>@kO4q6X zAp0d}i;mhHnq&QSkQ$wc26j7%G!F*P{I~JV8Q;ymGWI~Bl{KCJ-NT6qwCbI zL7|gNnzD0N&gOaJDQlkLt0-yWdVvMqxQvQHR&7<_&3THC$G^ z_S^m`KMDUsC+DYXU_ zRbF(ej_lW5H>AP%pgY4HjsuT7nzQknl)qzrlpdFhD2sMDHTKr~PwdW#wH-aK4K-&& zw<)nDiz&l`izt=r#%e6rbG$fO%2C>FrYXDq?oI{ni|pGz%8`8u8Y}MSLJq63D9;(n z!WPAql%IW+tYuS?9mTJAY>AUW%_H(VPVWEJRM(b$SZSMGLg`l4N6GeEud$xT7uzA{ zR2*~DoZWm%Y4~m_rEWrD<%RDpjosPiv3>gObbF9zN%L{ev`b3ZR~$c|=Rf>9tgY^S<&R3`6enko=j+n~ zG6wJLZTx;h-5}2o&D+`bRJrRYr`)RLt;9YXtFaI(h?tXKYfwda9+tzy^d*v-}C1L5@Tx|Hp*7n3wQRceIUdT@7Z1f-V z3~X|lUgqpLA7leL>t8gWtbfXIbGAk$WdE$;&Br;p*@mgD?7Ih!FlT+LAv=Pz%WfLksKz7B z*(ZL;`f+yRGb4Lvm<4;G7P1HYyp=5X6lW8P|3@~o1+rVJ zdn;}w{v-QdOJrj>8~tC|N^Ow!;q2J|%6ha%c6~K(WrdE-UanZTb9M)0BRISJzp^Jf zBU_NdZtjBY&sF*T{9kofgd_VAg`F0OY#s_bC>q(7Rg5ubE@am291)A``xLfSPh|hD zOwg^{7ukgrw$wmmgDI?S2(tGp8Dn1dz0;wv%JPFx)6n4fGWKWeR#5`^qvR_l!Vc#PA z7KQCS1KGpnj4^ke+T6OGqh=yIgu;f+Mz$)24VjB0V+y-#4YK(tZ2Hg0{#cR_^V)UDcBHUNenIwW38OBb8?kPy*}o#Yn8Hrl zfNU^@jo*aq{o;g}`~QaQTnZcWJF?9wY}YNw-taZXynCSCx}8I}A^RGx4}~557qaWUjWNS~x#j&l=qj=i6t??yWIZTs#0_NE79zyl=@znK6t>kJWdHRt z>IQ~4vTmy;*~l)ZuyyYv+n&N!dx-4gHwZD8dW7r(3hVU*S%t#do+5k4(-^Z)SVQY} ze)tcvGb!x#=g8VA?3sU&y;_hE^L1M*m@}VDVK3!DHju)e$&2h+4`a-HUx9Ty|CtZj zF%ZQxb~1&1rvkEp z6!z_k$ewv&jJbaO`qu4SqY|>CDQuO>$kwK?<*Fci=5MN@0DfAzOvQdR9kv z&og7p-v0Hh+u7|+WcyOs7lJKKVW0S+?v{TDG2gF&Yz&3HRTEhs3VZb}WY<46#+>zL zfOR`xsDmjohv7u2 z|M&>m3KVv87i4#25n|r>F|yq$Y-(3zeJSkfPmtYo-54{Rx3KKXR)ixPNnsa#imWGv zofm=Z&({bs&-@J8FbX>@5?R|z*uhzETDR4N&yihz)fn?D+0jwRwxh5kx*_}M3PE>J zG_vz4Y~L@CbyC>wG05Kd%NTQ~%hj#hIr>Xv6De#&EV7L#Y?rT)y?B`rbEodej-#;c zdLUbu!nW#(?D0#+nCqOaX5G%sdm%f5!ZzuRY;_9zZXaa#T_nU@w=c5&DePPQkS$AL ztMx~A>jh)Xp2w?Nw{wL7$i`CGQUj4KLSc)>A-n!OA!e^Z$bL#;-3KG{s!~IwQ14smjoL~VL$yEb?l>BC2UFOCf~`bh+m1oqohOVj^YhNu?Yu{@Jt=I<;Msy*O`F2?I`Rj!T#^4 zQ5Vj^TR!_WC!+2=3cF0OEhuc&NvM0{2qETV!A_^J z*isbsU%_S^AjCZETa39Ih5cKwUKDmT&Z;Ux(K^g0IejwN`6t?#a)XhU-Zwq$C zK0?f~-=S`M3VThkkM+$5Pk>f_YFbHo@-RWsDi> z{aChh(|M@do5KDk*pd{s(R|d+*hz@_SHX6pu-?BN=sSFAnjwZs*B@9YbL~m!fVh3OiP?hqn-7c3XzJgDGr+ zU@KAB7lPfn*%)(r|6e?ynQNdo>M2PwQ)u=m(!tNJrJqp`q4eFlUXpFgCq>ptw?-Fc0g=K3|*N?(( z73{tZgqR&aqi%l+n=aV06t-as>TX$YjJaml!q)AaCfFDXTYnwu7N)Rk1^dgdgqZzP zQMW6F{Xww#D6HQvsJk-F7_(bvZ|inmBG^z0TR9DNAOAwoO%m*U3S0J9)OAwW*@C^1 zYK$4`Fk6mei?2uB=@j-`!M;af3vWQ(bL$8(PZI1H3R`d^>ei;P-w5__iZSMmt-Y+< zIo~GK9ZX^41zVBAJ{RmCKNDgel8!NVr?8I&Ta3c?{|$9Ft~JIyv&9?M?R-zL5frv( z2I>}|ur~$!(;7m|F~6g3CklH-uuoSTb^8Q)TDMi?X4G9sVb2S;C57#}1$FQIM2PvM zU}sR+&RbEpF@-%M*o&)-F@tY}WsVKqhPvY@>^{N1O<{w#qwcXE2{G>!>`)5p+=04P zDC`!&?)t$PvtNLRbvxVtK;2#xc9URBP}ui&qHg+1Ld?GiHj={D+l9IXDeM}-u32G> zId823*6mz-H|ln#uqy@oY&k*q%{{2Qn8Gd=Y-z1MCWS4t z7j>IZ*ja+Typ#~L?>^KWPhqDEwl0PB-jBL}E-}WuzCwQMcAhBMVHDQm0P0qyuww+f zdodyAya!RYH-#N3*b)@>nPAfw8DmZ?|zT0#ZlC4O=15M?A--~m_Iv)x-%*4Il(reupj@4x|foS zF$a6)wQlDVf*nU;KRAxMbtvp%!5*7Wh`Id<)E!D;_X@TOg>8Kjb$87(#$4S!k99l$ zA=sW2w#6yb^`)?z1-t2cLd-#@QTHsJnWuF=lQ%VcpKDg6%|M1J0uE z(>Vm))q+i?u(i&iZc7TgLa?`I8)N2vd_k6`cKPo@tDkdk9*$bFoXPJBY$g6Kq8aTj(7ch zJ6f>6&M?MY_4MD??VRTd>V{L;5rTE2u>T15heSfmgRWxCp%nIif_?O@QTOEj$JTAt z_ZsTXqp)`c+k(P&zmB?D(+M$W33e)ljm|>dKni}H&J)rR6@+h1>2v(cDjYSWhm?+!ETvij9E#0VBOB`Zli8Ah0PSK7lmzg2X#{> z6Jp*W*f0v){4VOgm}Jy_w(_2JTm3HBB^0(vHtGgb*bRcso=AxK-Fv7zlfteOY!HR5 zdmnW#PcX)uGXJi1JO3ou@f7y02dGBp#-+YhDoiV_AGcG>b-=( zGtO!(n!IvjFtXgQQDxgp245^Zs-7lSXoD>Gwp7{AUz4pqva4XZAE(O3y(U}x^C^Pm z9-_OYchbbyWIcacCs^*8sP4=VWej(Ua;7u!IKCQ9fiEHi! z_A+*-#=`&Pu;76PU+OFV0ldz`N+}mN3a|4L_?N=xod2)po2D}AEBA+W?#N#4jA&Cz z@op=;&VdE5^Xa#r9NnfJCi z-?Nue!siOF^O9T_)&_mT{swPo&Ffrc)YskVJ}j$3rt@IKQkvH}u)K}9$Kw~~(NUP?)R+5$ZyKh0;ettvOyUcdf>Twdp@ zp85)&(rLwRIX(NAR7OPzuXA7rXDN=%g*$S2ovV!cdZfn$^g92hT1jQ&m!&jaxF@pS z{Mp{CXbbcAm-{o%-QmuPECnB4;dLItb;mbL=04}sa(SJrdg?299cM+CX1{DGuFTvf zyv~6IZ&;`a^NBNyD?@!RqX#Ik5Hsxe=6meSW9H9%^Jo5S>1jlY;C38T0w}JCELMQ9)4E+hJR66@JXMTKAs)=tPuC47Cz~NID33?s3Uv%A3=Tr<4oDh z)$5>7y8kIJrRvA+P#0Lx<^JqV5=)IXWxpL>SE+b#GJ`f7>KlUWV9s(sYChHgpZ2d! zS@22Enmvn+S?;NfZ`KIe$(&Wv>N~(Yc=D{CrtG^rnxaqoh{Xk!)K>L07Gib;R^azl zlqm~7>Avlkvc1y_D8n7Xvv>;E<@?hevFWLHSI=U#ebv|A>pLjmlYZrPettifLtS7& z7xn{#Kc)KqsEqo0s#2H|{q)I1Q)40K!0<9W-vyYm;IY2@dIqa<$fiVX z5+3VIxNcPAPwj!>Pwh~X(M%V7($g>OVHb}*WxlJ0Pdc#R!#lRbM?tPW>1rFPuitkZ zg8uBuYo4-=t#eq2nb%>qWLNnQQv$!)3;CU$-2XRve=p^_x87fm_avO9H;tM5vj_A( z#sbehWLa+t&+!eM<+<6x>-9NYJ=WD&)z@E+k3)a;cYZFL!s8trLXLl3C2Rt}jAg_>KQwV$O-8 zoq+{izMsJX=S<948~0~F@BD|wO}oS<&k|nt2RYm6@=~}rupc#LxySm119_Cw3(vFD ze?P&Pf#v7mxgY%k?xkyfUmgDaXY_U--sdcL1e8~0})6z8RQM4e#0iu!46tBXS%K4H*zcQ$1gZ`y{R z$quj+ET%{ejfJ+_+2%ustA97>!AAb)s z)dhd{z!^o9w0%ceM#Zl*_DI_*Jik`p_ePGVysD?Z+PFV^oB73*VBaImKV_oEuCLI| zainctN9DCCrn-^JIrO9N5_*`q=b5FkaJ}FO-(dU}Q`W}4-Cu+iR~8LD#GpZ``uY4_p)OKo*lV}m?r@q2jz?^g<$k0-gu`k7NDm8^05 zS@_&SIDQ7*xQz35=o`2ff;r1Q*8MX|DIsO|vG7KL8r#{skOS6#lkR?D%HFQuT4Uk;g_u2$Up9Y_q_=ze_`S?o zc4{sb`m>M*HBH-!kIB%N=h?WuY(?642G%QY8*^5Ay9aUBwt0?$y)*1nb5?r04Fu7*`|n5V=NohnJnm=CN^kdPWA?K2CBjo))rHT#0}=*RRKtM{V%YLnjX8Kd^HUR#ZCe`Te&dk|+8IdAsb zy3*Ty_|Uyf^`n1nR(iW<4BS?jr{g zSn2Ja5l3L9xBH8M1Xg;xJ1DI5c25{!)E(-(+Pa;kxBHg<1Xg;x+bFE`c6an6=w27z z?g@Pftn_x@(#OaKc&xH+E9vc?m%>VK_vXC`y3*S{p%;Ob-tJp^5?JZ&?nYsyx4W~4 zQTN(EE3MmEdb^M6PGF_C`?jwLtn_wwqp;H3U5O>=N^kcuUmDq@yUVTHN_xBRh#|1j z+ufbQN^f`N3xclnb{`u}V5PVF&Td9leCD_4_ocVHJB5|r?yaH-y3*Ty{O1H#db{tA zB(T!k-GjnPZ};HOjJkY%!TO$*-tH442(0vW&-|3YN^f^h3M;+cL&6EV(%XIVCq{P5 zo`u$JCB5DEbtSOU+ue)8N^kde9}{$?xBJvC1Xg;xANa_~@;M6YwvyiN-V|1PyN8Am zbfvfZ^bZNF^maegnZQbK_o5V5db@Y{z^Ln=GT*wLrMG)xCju+I-H&u6u+rPzm%>VK z_s$&%y3*Ty=KDrgtb1AXDbm~hSSW#&-tHwStn_vdYfsRX-tM#85m@Q%ezL8RojYfa z^?fD1-Ahwg>FwSngrFz1<@eg0A#-U&M^8d%}0tZ6&?kFFFaV^meaIVWqcwR11Qx^mbp; zoWM$N_bbheY$Z~K?tTwACFn|T_hk+OE4|&b>;zVNyVsFw^Iu+rN-p%y_`db@9Vi@-{6cQ*FqwY z27#5{?mPVmtn_yGps>>0J@`$6uJm>vU){*AuQ0~Ct)#bmW;FsUz1_Vitn_woSCybE zz1^o)A+XZh{Xk_S+pFj(>$Z~K?nNoA^mgx1iJ&XJ-Dg%Lu+rQ8SOo$rz1@8&tn_vd zD{s`T?w(-X&eGd`b~yqoz1>fjC9u-ly)1>5-tJw?5Ok%tds1m5%l9!_x0UpEKVOQ# zN^ked6jpk>N0cP!N^kciB?zqacE4QQ$W}c)*t)Hxw|jL8E4|&LeF?hK+kHhb0xP}U zvx*W}>Fr*N!b)%V9z~40Q@0MVZfEK3zRHKdN^keobv2H8r?f%G%z)ElTKng3p-Q(UM=t^(*^_~P)db>X@Xk__W0PD7r-tLVltn_vt z?m^I%-tHL%2(0vWw^3N>?cUs-pew!IN98xN#|C|A-Oke6eTN%?mEP{|6jpk>x5`J* zmEP{-^AcF;?Vg#($TpAYX5CiO+ue)8N^kdeHiEA7cAxsM@l5M0W0v0Thh7j^>Fr*W z!b)%VPS1_Ht6P3%-Oke6edaR)E4|&1|3hGFs{@34xW~?iDGl^mdPUOwg6y?n@pKSn2J4`F}Fu6%pTJ6QcYg{iz1@4=GwMEk-pRV1rMLUqYyvC2-S6Keu+rPTK82Ou?s0br zy3*S{?Y5DPxDsmJR?^%3=`8{)z1G$C$Q4nedjd-E4|%4D6I5$557v!mEP_XuNc{esjaNrN_xBR`-{LzZ+9OG zE4|%AFB5d7w|n9x0xP}Uk6kpf%f5G7x0UpEFG*pgw|m$Hg0A#-pL?FbN^keG=LoFy zb}vt1rMG*;S)*>zQBAGeS$exKIzwR3d@X7XohGo-+ue`CN^kd=Qv_YFw@KVWqcwyS)Tm>Fqu}lfX)E_hWkqtn_yGrLfZ5J#4p8 zm)F6tZfEK3K6e*^mEP{BcM@3X?Ou_>N^kebKM1_uEX+mEP`YzY|#1+dYH8 zN^kdu6jpk>5C6@m%j=X_x3lzi&qyb*(%aoeVWqdba}z;Vdb^L=NMNP6`_2tU_VUuQ z)@>!d-90I+^mY$fPtcX#?vsBdu+rQ8P#S@i-tN8>R(iX4{>7-vec`R!S$ezAP9?C? z+x_%90xP}U%TrkC?H-;&(3Rfqi+(n;ycU#oTS;&CD{Bd?^mg~7u+rN-W(`4Cdb_V$ zO<<+B`<Y+r2S`mEP{dR~mJB z9V+W~mfr4LRuEX}?VgvyN^f`Pa)Pe(b|1Tpz)ElT%%w(_*C?}YE9vd-MPa44d%GnB zUFq#UeKCQR-tNa15m@Q%UXsE}Z}+f;MqTdNWZllv+kIX#ftB9w=NAxI>Fr*b!b)%V zs3d}}^mbo1-^g@YaW4>-tPVsR(iYl`ktUGz1`Q&C9u-l{ox!V%RLmV z+e&)7H=wZ6+kNnCg0A#-PoG6#rMLTwnFLmPyEmt>(%XH^cSc>PJ!^SSN^kd_GYG8o zcK4*P(%U^Gk)SKR-KTy_V5PVFk?BUZdBhd#wvyiNB`K`*b`P6I(3RfqbEgtm>Fs`Y z3W1g0?v*L5^mdP$Y}DoT6s_A?db=;1L|~=2d)7n(E4|%oQCR8i-fIFuS9-grj5o5p zUbb~xNpJUu;|Q$uc5gsorMLUwu>@V|?VkP(ftB9wwwJK`_U^TAE9vd-9AngdB`dw% z$Brhj(%XIaC;}_J-MuKR^mcFeH9=Q;yH8Irvdg~TZr#q(+x=KPftB9wr75iRcJDfp zpew!IlSU9&>Fs`TxRK>GO|9EXdb?Mpu+rN-dKf`hdb|HHl)y@F_d7!ftn_vdps>>0 zz29J?F0Yws-Oke6J#7$ymEP`8;s~ttc5h5!rMLUYfdpOY?Y?b*k>xe_tlLU@ySr0Z z>FplepP(zf-6!`Wu+rQ8KwknYz1@paSn2KFsgF??JV7kS-qPEBc5ebJz1>guBCyii zy&{E`-tLh-3A)nTeOV79%WM8vx0UpE&+1NKrMG)63M;+cdwoUFmEP_tu>@9nyFdKW z$nu&Y)@>!d-5XL^>Fqu=hM+6G-7~%*u+rN-FNKxf?n*R4S9-gT?`C9q%>nCnmfr6B zq6nC$Q4ny(Wc~-tIj!d-Lpaotn_yGr?Ar7y;pmJ zuJm?KZAW0GxBH{EMwai}v~DZu?cSKeN^ke!Ap~9N?Y^ZAftB9w?i5yfySEA^=t^(* z$*qkn-!o?2&eGfcKq~?(z1@8&tn_vdYe~?R-tKc10xP}U&od*-_l;P$mGpM6N@1n9 zd$g0FE4|%+XhC45xBH#u1Xg;x2T)k)?LMHHQJ1efTeq|Hc39xA+kI$#g0A#--%^jjN^f^J3M;+cTLl<(`R8uyc9!1m6YKtWc4XCm@oXCR z12p&nfwz-Y&iSF+8R6~z8h+)fN0j>d?vNzmb$;PTP4~3a15GR4m$7Dk<=nOozu;BX z=AN2P+c+O3S^AZ~C4Pi!3eR%*IruiRL!1?#uXf(*=c9zR7JlWx^0U9_InI4DG=Dlh z+p3W8EC1;WA7x^9;a4tn;~gjWKjz+rIi9w97W~R%3T=1(9qyxKeIxwJYjNF;t-ZK^ z;Qw;@mFrobX+ErLolIxmFdxmY9N40x;_XV>gIs>)diId_JL~{w^B^ny$`9~{c^ww~ z${R1(@0|0lk23e9@GI|^4}5s%aUaf`_KXEXG#@d&F8Gz_O}y>gQ@pTpv61jA2ez|! zTgU2_3+>+i9ZcEN-;6`Q^1jQxm0H(Iqh}(pa6j?-g!c6nm?_Ke3l`VhgB{phNV(!8 z{L1@tmV5D`uk7)~bxqkFm)EgJ2TP#8bJ;zOo&vQr46Ouk+cS{IQ{d<&n>k9gnzqg>E64r17UN5kq3qG%%y>FX73*a|= zdSx3npn!+cwXpCjpTODjUECZ$+&gS{^()uw4$bS1e&y{)xhpG_*{BOF=z^bSucF(` zb-DL;Qu$~$sAhhp=r-Y3K9RHBThb2aG*fq_nX*5$@>b42k3moB4udA4F0jW2W#KcV zq^&b$Q-+pAzw)`e@+zJm#v{8x#=N9=5cl9-V#pnc!oNv5oKS}o<#l&R13S!TrGxJM$Dn;~bV%#jYX39Pc4Mm@Hzmuofia#o9 zEL<;)MS0(1%C7AZs!Y81BljIW#U8B?Y$n$g;|1)OOj+u=6vMrflk(Udrn6+!vnb%f#7vG4p)zL{q+vKGgw(0y^MjMeZLstb?(mD=;${wx6hqZ+Ef?3 z-7~8!WBby7WHVa`whU*3y{e!$v=uw`&QN?7veH(t`t@&O%%w5rc*n-pA-O!b^)bJU z1zq^kv%LB73trXVgJY!i9!iiLBjFn>$4C6fv_8ei4slK{yvBKbzK8Opg%~3N3w^gw z*xz>7F_NCm?zqdz-&?2O6c1(QC^1G7y79a}`d_XwlAeVz(u#MtJCnZlP)5dzF%rBv zZk@gPc;UVs{8rUGsb}xE^I`p3WjdD(_Rz*iz&044VBb+G8$&Q-(>B#-S2(*ESusYs z%zyZGSQsPqD0aY^5$vICTq(v#V|j4nno_nxXN;te8OBIs9^7?i>~vR3yNNLp zu&KK`J9y1o`Wo4!YRcaOm_Qs#&5N<-grI7R{%at^P}!k_<^rYww+9xnD|0T102 z_lpIQg|SLh2#AG7bUB<3vag?(t{x*i&vy6X|PF+7K6?VOtb7aZsO z^l?6A%vag??9Y=iw~GvQR6SkVl!bh8t7c{P%dxzQUw{}R&F3sU^W5q@*BD73Gvw9= z?He)o`ZlFOYcWOwb`cxs7<2Ow_e%dqd-nCLe~mLZMjBPjrkvcr6=Mb#+BtmVZl3S{ zHf1Aze$0-K?1=rZ@@NIJNu1?t3z%~veNLFNFurQF^8>bQz~9XO{xW1!I4eBi;~ee@ z$4uFsFJAEeqzjv;JYj=ZF49Z7p`Gu#^)3+9^Y{;|2L()q}A?7I&wibi%f7kE122aK6NOQtN0uRJ%;W+UhC zXNMMbM)oXcvzIG;EE#3z-TSRJ>R}9_h)~S zt1r{D_ak>=Uv}+Ro8q-d^kqUfo`0r$VDCD$xpuvJ7W%Sd-M2e~ey}Ok715V<_19rL!RKJJc7}d1G1Z&(OMb#G&K3ROTFz#@S;WC>-a2@V6;n3S`4jeK ziMB^9<&&rQEC35J^KrLhe6vcXEc9jRp*7f+nh)5Eqt}rAnX{P-ci0=BFPy6{(_iny ztp~9`I{C*PmiNc~8VkDZBB$Bm^S9~s@_w-B11Ia@b&IX7C;CC?Qy|A3s&YE0)y1c# z_w(cp7ue;WgE2oZshfeiz`i*81lzfO{RgJ3Gv8h8k5)Ik%KZDTMfMNbR=f_CW6Pe4 zrY!V>l|#N@A7iY{}SRopQLb%BMpg8kn7 zvze)GgGu=n_g{x#ZjTHVEc7X2pSaDISjwD*K2(|b4O`)RnB8eI6?M-bTaNd?m+d>- z{A#NE&6pyJPuL{P?fq7a)7UOiLovrDmsw-Vew|tb`?6an_OM4Wg3XrK3;g6+t4-PF zYfCCmjwfQjkX$AKb)jEyPP~QhFXRBT`)XQx8D&|)nbt<*)K%nPl!2S{=eF@U}T3lcR1HLM`hpR^9-V2 z0QUajwm6sTdcAtKi}Mfc7eX@cvDG_8zaVsBzV3mYkMFeW)w7W2yjE{_F8b*nt65I; z3tw>ET4PJtlgr$-gD1V(R(dvaUSXDznCT3jeUI^s-U2Qa*o5y!+12;SjNS5QJ=SnW zrqhY6=ojALgSOXUpaS#Rd_3;OGwUT8SK&Zo}xCvUNKd8=S<2Ns^+Eqf~4Yj$mKsv8?V znT3w${X)bI7V2LdS;)`MiL3B_KK-}dl!bnwXnkMS_1ra<-BR=m+c}%|AjlEq`Jp3a zejQVGa^RWkk=l#RJNqdon{9Khc z68wfM2lJY;!1zr1fSoUWf=$1@4cSwibx-(!_hX%KO~vexJ5AygxH#%LYDTcitO{Id*58ACQIo zENTTo?m1=3LVq;TJ%N>fdlx%YMa+9cj!i5zn)lu99j^1<`un+nw;Sf?G2d@uhc11O zx@vxgy#X+1XVzA2OSmb?t9ZY_?-9@Af`$Cda}8ftwArlK@2eD0f-}Cw{Cua$EYyYk z+%R<)zP}B|Z!y(<84LMYu<;I8)*S-jU+}74JLczUm)NoOV%$Cif5MobjqDKTov1a= z%N;JUeYHh?2G*@}I~=#W=4U+{?zsc=b84MS?AASzpM|a%w`VS7+V$#L$TwpjZFdF~ zy~Ix5>DB_m9m>Bm7L>tpJ3o`7XmZeg+oStYEAHze|08_3Wq) z_1GBBUPe~r=ZZW(zYYuex$&$+&LiC}@_VB>=A3Q(-iS?4wa=Y%lFw1(W)mfI_sk+9(Gk9m1gRd`k)Si7k3(w!G)aTB5&CjwrX$nZMGrF$>mXj&(*JV%|M&;B$16>jHbe{XV|t zRl+=G{@hM2TC$t9T6h52J2Gb9)nD+>S2>>-^tzDSqn7t%VZUu-i6=yE zzsuQPMXNdnXSwIf?RwVx@dbWg4Pk5hZ)Oooe?whhp{zvJ5m(;U;4sF9c>6m|*^(o)41xN~AF;~y$ zqO|MPvyd~+J9aoPetnp=+bZ(pRL=806zA&sT9B3}^=!%^Z+84argLJS!&;sMHhcLu ze0}e3t~{w{(+>u)4i7V(W04hkvOdp~ufsx~4A_3ydHL)iHgbZQO-DA}m z``8jWc~YeRzEedseI%KKoOf7eifOW7FTkXCFR4o3XtP^3P;V%dsBU zb~B$7UdY~(F$Z{bckq4NrYz*x^jlR}yOdolX^qH}w>f*L%Ex@2J&e!k2mi#mB)F%yoI59Q;pvmX)-b-I^})B;+gK)qW0GU*!9GP1*a` zw_vXGSeU^AcAZ0AHP^u!?BrSArtHIQJ6YtTD9kynDxW|W^3}xd%kh5f6Xa>i#y;jb zmcPflJN(L`{t;96fus>S^Z4;NwZ2>kh;T7IK}~&kOr0{u>Lq zPTdRa%6h=t&cEPQjRA9A=r)%9s~8h5!k;kaIwL#8>9%N%^I?r`>_~Z$>wtyNfiQRF zn(Oqe&*rU|>lzo^#!5Ryt`oZPIG%(xGxa&rvykiV#P4wW|GSl4`(EU_#ax%aKX&+x z!q-E!T&HJ;5A|kg756$5?r+s{9k6}@o&G}ji&sXa&rL*ep zMRMh~b3oNwN+&Le?{I#iWt;{sc zS6)89uvO)5Va~}bSw0`cKl7I~W#Mz+stMg#uV*Rjeg}~!A?F-lyw<+$f7f&6Nqx+n z25iH8m6oxFEjumd3ewd>ew znRC*_&p;oF-|Dk(WQRC?)7Cf}e7}yBEG=>luqpG~JC^i*&Yz{6@s*yfo>%0Y%&F_x z@J~g~5xViXFAv6s=DPed+3Fq6E2G!3RtX~K{K|C)XT8by6g=g73l?iRN3UDCsW-c5 z-|Gw*3hz3_w=ooR4|Vc^wv>gRZBKIvY1!$L5w6Ip<&g z3^qQ0zECZ@?TqtXIRlj2Ro9#`{2?bE?O#Wxc-jMO|P)_u=yyeBCQ&o=_h%jFGzj%jZ@9 zTFokl=H!jQ@J0@pV{pwIde;3@zE6#}SFi4?+3=BWx$*|Q$HxZMG>v&lFTQV!ukZV2 z|HwRUiG8YS-hh2$U7~XKsp@t87yQhUYkZ8)Z`n11g=b%!4Yk=U*+&~OZ-lm7$ucjB zoKsS^70ma!eumZSLcVI;W*F;mdj&i8o?zkGcTe~(2%aU^ah;xZ)Z2`CW9Bc**p>+w za^;PgEo$-k*uti5^)gn?8+z6g-gf>4uWAgKH++_8d1DX$gmJ85WclZ^-_|&7ob6Fk zA!8ku?nz*zZ7{x=*A=Knm6<;j8#H@=ks5jJrF1IMkW{JwfP}G z!?;k(8+vxjTi#3w-ixd{e+6v##_`DV|JCNN4D6%2?2yKaywQv2jn`q}IY?`840RuQ zh`dpZ-y5@g9!A|;@8^tF^t$zHMq=K8>m9$)AD<&&;rZqNaX13QwR=O)Lf+`qjL$)F zc6wWpH+(rew!}%)b|=^X#iJyFUB+>wOukKEI&r%Dw^3 zz`x*C{XO{n_MV|Vzvu8LjL)x;75)D1(KEDuA6Up$t6S#D_5A8twcih%Hbd+8g>Jkf z*efsJHsfeyq2Dk2{S2+&KhJfcPk?n8*XLKys{Q`Hg){hjCDsRlg}$4AHiLUh zdw%t-+V3AgRy@C>WWWC^R_*t9T%Doy`+oesf_~q3bpeO#XAQls+V5vy`A+NifrWeH zx>shtmU-8pSt>3S~S?KqdeP7Mt`uyrywckJe%Pg(m2NrbQ69ReO$m#d> ztlIDIY&=Kn_iJ(%`u)kX+U5GZpl8*7fBL#PTE7o0=q_TN`M#5!eqYb3{r;^w^R#~d zEy?nITMpO0ThG=~`+c5!)PBDfXVsbzR;<75_t&0CGW7e`{;8R3?pm*__WP^rEim-^ za9+yl`3;i&er)VQL%$FE0$uxkz3$6cHE-xyFKB-L1+VJw!Mt&Bv^Ga^AAiD_H;k;9 zqwsn>TAQN)7Un3rPR+|d59WMc(6j0s#SX7A+8l+@h54L(xUX03Q}nDlM{%Um7;TQ? z0oR2&iuD!pJ6z`|^sG8ZG14FQ6pOq8EX*@>x|}~(-q5q^97O`MB5%x+a}=*)A#cDO zMfGK4v^k2#{GNn4iu;EP<;oj+U3HG)&eE~k90joO96|r-I!B>rA#beZa})`gD?BCek`^sG8ZarVg+ZH_|CIdFEtb&f*Ms&f<% z3nyxG6l%_ab6&1<6na*jqnKXxJ8g~vat_QRdbG>Ed-aszu;B< zJ(zPMVzo6OcX-(O7)Sk9pM4`M)_^wdj@8zHfQ2=nCmr+U$~k&gT>~1P|0`_`Na)5p znn&b!xUK=|S;#rC1{BojD{T#^0M~^zAXp!CT?5jy>Kag1*RQmk11zlL_yu_6$~k&g zT?4w0tjIY(@|^QJEaV(m18O+ByS4_@p5Gg=2J~!Y5r^vn|wfM9Q|>l%=ry|1nT@!z4Y0Rb!4fM73U&Ki)O zRo8(0`V7<7fYf{ym$4wobqz?*LeAO3*MOoeDAYZ{6P>1yu?5=A-dNy0G0i6vV zrL6%0EAsPxel{&<4M@*I-Wbl;fJ!Ee)z*OQoP{+Yezq)E-q5qzat-Loi1FGQ5U`*N zXS`k4fb^`k2E_BTx(4LntXKnzcet(r=~;CRsKLHz+8U6Wui&ht>l%=rRo8$HEc{+u z1A=@7Ye2k?T&{I?J-bn^0S&(Qy|xAfEL<<2Z|5I>bJlV6thNTk-)D6V2=dj-)_`7z z_1D<=T)N&64*!Bz_4i<|tJFr@7gPq4+e>ntkrn%b8qRH_?F#}H_HMu!$#q|lo>lh+ z&A!k^+ZQBs;~i(Kx#OH2gaO}r7IGcz3rZ>*qU{SR%YP8|1;PFc*L^{HR^1mA;0)1n z9k8&s=h&cvxpJMJRrduoL{{Xw-+8Wk9TsvO>pS^^5WS0Xgg4 zdR=v2(Auk=w0%Lq!hHqfBiDUFdKPjW>(##`lZn>7ZlI?-<*9xdRE;RRQ2&NZC{X@C%aDF%J*;OtXb<>bzjhm$kE!q zAjp%jF9^zQY`+|Ulc831MbzhL4U9av7;`!P0xY!rel(S-AP`tx+ zUyz=SlKX;6c9@~<3sUps$k=Cg*L^{HR@)cE?=N*<5ada*wld8du_Dz-wtDE*Zu8!R^8v;qp-i0CxL}M>fsx6 z=Se-Q?r$HAtjLo`c%FP67V;$QZ*N}XZEb&h9Dk2suXyFPMfthZoII)5gw+vobd ztL<+G7WTJ?HSFYY-QTWf)&1>LC%vofZwD5h1sF@Z?r+z#+WvMPySl%9kc=64*Zu8! z7RGh3zrEF{rrQ2?$g!}$9p(#M_qXd=b$@%zs21A(c3?r1uRYma_qXd=$aS#4Ju<1S zw!gg_XJLOk)Hre7->zqu$^GqT|9D^9-wrJ3)*AaRuT_$>X02y)_P2M@_P0lKR;-1_ zJ6!j->siQku)kgLiqQ7ALym>@F5V~P%5{2H-QPZPN~E^G9axB&f8*W}c%0!|&!)=# z?V&3NY5UtdO7`LN{(OETXMek%)%LgZdsE%t-ifoS7q096c0J3VU)=ag z)&1>|pI^4WU9bBx7V`7U_P6`MrSLC!ReulW=VULYo=a^GZ{|z#vym0&QYV%w#ME=C zz{0sy7{~JYI&E)&o>kAKMn)83>bX>*E5@;|=Th}7N%?rQ_rQg;JR=w6~+s$ z=Th~odM?#xb0IB10}E$Mc`ce;<5)ebo=Yu_tjN#Tcz%8z7VNIeTV7S5&m`Msx|OC7>lIG4KY`>0%>fAy?(E|uRS>bX>4#ko{*9uFQ# z_|~)PxzrWuEwpo~&@VvDaE9LXT&kXhJPGGgd$17gT$L!na=fGs-9KPrPhC= z3scXf0t>ouFT0*g)wAzv=Tf^c^;~KUXI1|q*K?_Q7S5%%;^$IROLW)Hr9!_jGPWG= z%e)<~=Th~ob}p4aPwKf;V8yxAcz%{I=Ul3uO_1kOYfc%Wol9*huNUf#xSmVZv)Z{- z-Y=-z9X$coxK>C;X-)!IA2LhT)x%XRG+^sHKYXSeqmr&@bQ=!&^q*ZPEd z7WxIKy;Jn7GfuVk&WBtVYVW{Yu50ZbJ*(EQKZ0T%K) zKljAfVsg%<8dzC-=Z7t~ooekJVBz_N&sVOscML4Q7oqk}+_@)Cwf0UjXQB2EJWH;% zcl2ygwe}8wuhiN*z>3;CV%^KEol|7(ozO#hnOb`X`Xh)L>V&!0-qGtqzW}v&>SVbw zwf4?r&O+@Sc%NMR1p~`-JJjB})~_&AYwrLHwRd3c$hG#4o~xL$n5ngQfQ6W$ z7NcwJ9X+en-r@bPT6<>-XZd&%#@-Is+B94)4p< z+B?vfiP}5xnLVfWj$T)*pTqNqT6+ijvX|A~c^ww|vX|A~DGJTczu;BYYV)sX284F%hj`Lt=*Woa=Z@}wRVSd7S;%$E`$}U z*4nMJGC-@f3oK~Dv+r7K*PttF?H>5CzE*2@1ZSbvZd`_g@2k(LwX0`~sI_+am_eAGU`FKOEwF|7M zwHwd--|D7phOD)_WZrbG)@~in!WWzYbgi|kk9mMvYnS&AYOUS3IV)=I!nwGdTDy9d z_hrg%UTZh-^Ld6^yL_)`6DxL#thHP6%LRs7yHJA;)}YMZqnEK|fAOigO~sIwywk+A4YYdfPHBWei!rW9m3Lz-EwyQy|r^xgs8O(ER6Td)+z6p-809J zKp*pw9uxR{_fu!mukD;0zbu7kE`WucGqJlL??YRd>+)w{?hbeMbaiK^Ux4r<*u`0x zqX?bcfbWIKsWGV6g>g<+bZNFKs+)7>Hc|5y*oLVwd|%K9JWuBM0_a&@PYKVbbe;8u zbExlS{Cor~#2gsjh1W?bVjeRe=QMxj&r$}&IlXU-I)S@63ko`ZR+xh%CR_l0O z-d;nT(aScYF0c(#YvAW@n5!^*kCuGKYj^PXxM*U6b7z|rWPgz{!#OcHw`|737%8|E zV~09?+&(u zkv!q7PVq!%%ydyN_gBt7e14RVLsP-m!<^+~l^|sh^IyHl>6Lc?>H<6FW*`0@m2tq> z-|TwVG%c^dxIf~z#m>xyeKi(h=6mz_{K)sFEQ~iMrjKWX`~2eUI<*tB>$&dn#i4xO z`wKoU%<%=#x7DTUb+Crj?A05bRX=X0v7ig*JYb#BEavIM>narwPUi3NCVo$bAiII< za!=_X;9)JujOF)*k~Mo4JKiGGIlfsVWS4SQNvn^wvM0~#Y03ufXo@x1eWH#yQ(M*3 zSkO%PZW|v@MwzlOR`G4Wlr6e<**V-%9oc1E7d|IsF09VitUH>rz1DZYdiBSA?(zG% zoW_D~m#B|np3sy%RVhq~{+idx=XvK@&63D2=em(TP$wViZZ$JyVXV?=ehONA9 z7PYAWvNO0YczVDZJFJ(RzQ4TRPrtB-1JdH3FI9!_Lt-1c+TN_3vShB*Z#SVy1+ur4O2b%=gH!xEI&V> z)cWN-o46`~P2GAG*{?YZb(LW61|KJxvP+uHPy**=VV-sCd`V-|`}f2A+%D3`l(qGq zp(Gy6Vyn^{v#7>`?a6ghcMaryU_M?q-kiO7XNCf`7!O4@;pZhUqAsv-z5FwdCCkTF z%HTQoS=#azEHL~Evb{K)y&U#zj{A9*R^sh_f#A~iG? z;=OXP3O}Fnp()FAhO+0~!pfrZpR>So4B2q53-jJE2R!}uFmpEhuo5_zbTb?+-@PL)(@zTJ<7&n<-GXV8tyIM2^| zv_hXM(|g1ARPoO!r9>4R$dVfcYAo#cg7eZzcfT+l$KI;&TuFSRw9=qv9PdY4X)LTQ zz!)0Z%uM&!YPOa;IolZ7uCUF;wu!Gxz7A`WXP49!XYfn%UogH|1nTm& zdTsvM&{hZCS`KuSQ`~FCv8ruGw}5vRScny3hWgQFG27OAw1oMuQMKckv+UGd?6&{K z@O6!xv!w>zWG`gh-i~7{(!MjWUU_qBup8KMKFE%#!`pfD90LpQlWEMoeUWt!h+`oQ z<{8+Ht-G7EVWp5ATQ824+%Vt39(df(oNZc8T<<$^EPY8*E*8%5xt>`zTyKp^;(7x) z`_2Lb3+uS1*XvVFTyKLowrY}*En8=}Is3#9*-`Jsv0ht^EX;qK>RzZNuD4MftAE+Z z!uerSc2^y7y^Z;M^w`MW88*_KU0YvVZ%`aR^O&5gt)RxfB|E!;xL$i4%ln3rjmxmm zjc+Wjw`m+pD*PYW7zeTm&3HQ(HL_ND!xq{C*}N^{SZUw?$iCN7T(2{ZIg00I!#93u znXmZg6mh*u9NYF^S&#POdRxXZrNn>K&F&zsw{;wg{;%wb&d81o<~irTvYWdgYYQ>5 z*Zz4$TdfEe*V{Hu^DR|#>?_%6k>YyW6WBq~;(9{~Y(%WM-VOw|RZnDxcOfmT` zy`K=+?cX3f_)`MAew?`82m-rm0+NP_M{RyZJCB=&?4xJ` zJM3F=y)gu~_Y7noeo0`XW+FS_D*_ufTU>8VsWG=G(dJ@>C^N`K%MPT1a zLbgvI0$U>)+53G7Y`I0q_UdnBXSR4nTlp*z*E@i~x-CPtM;w8DvK-kvg9z-cmEwAb z5ZDVpB718nfj#;YvN6MrY{fCJXy;vPkj)xFVAFp_Ha4EXu3d-hwFCmY;}vZ+X#=v+-w@dNO~_szOJMu|hHTV$0vq!?vKJ>1*sfcUjhtj;q29Ii zxP2S4=O+_b=MH4MP9?DK?L_wMGy+?DH?ra164*+a$evClu)h0{4g1c>!sm6%cJ?@c z?8%t~_Sr#XJI^Mt*@uxmK8L{mbrji7-xJsqesUu%OFcq1J=MsbZ~uz6@_K@7P#S@?JwL^gS^ksa#W(>m8Jdjr|> z`w48a7qUqQ2<+TK$d)}sU}tzEJNqz!ol+Rt(npP~Tjy7_)i@tyXC5Q4Ul&2P=y3u& ztSGXHCkX7oV#xZOBCx%Ek)3>+z{VCw*88lH<@=bd@5!hV$WA;*V8cry>v4g=hLu8g z%tZp*p)|62FB901GRThl%gB~I`HHqu$|7sKN?@CoLw5Kz0^6uOvQMuQ*mo)*8+U`i zzFiU7CpQUfjY`N4xNT&U%k;9oC#zIO_Wm6LTdoSSy|M{xiK@unx<_Drsv+Cs0fF_b zj%?OLBU`iUE85EKO=P1V5!e@kz4n;EKJi1{sJ{v9{Tj$#d`e(%)kHS@nUUq^ORVp! zt8XED`ZXODOJMiEjqH(p1a?;)WJB{C*~aHz z(au}zBK!Z&?gcEWs^8=Is6!Ze%El(1XEHM_kTVK+r+)^;6XH}ul%w&tqc)^(iuIa$GFX;AUBw^1E)#i?mBJ9Z|&DP#d*k6ZfHmk2{#pjjH&&i*LYxYPCVZXmyvl&*x zev_=(gLe@2%X>6CvOi%zPtk039AQ7XSFCwAkjKpTYV`w}?LUOD%hNPlc^6@qJgC{| zp@dzOuGz9A!annmW_u4;t^BN(>3Q{JhGt9eChV+-H5+;lVW(tjwm5~bk3FKX5!e$@tUaDt`Xsf4Ycq1gk|2>Z)S%_h%Ktz(Ou^XkW0n%y^(u;0zr?4a3% z{ofqT?wCW^Uv%qug0K(I)#g^tBkYg5wa!nQjlRBW%0pw7JO}2>ZNl_q|ToRtvSciEpTOrgf6}K0l}19VLXlZjm$d7m!d{cF&5eGGuutf=Y_n=T=epUee|6h?3t?w0)^2mjJB0m9x54ic_VEI3 z?z(cqp4M$(g=%FN&-C#-?s;wQiuVcon{NF-AZ*qP+T4N<342VpJ|7V_V~I9*K^0*S z>(+CdYCCv5ZobbWU)1K#`-HGHy0v^t*pxzT?$jNG-LKnIpAj}`sWx})=c<+8X>NK> z?$K@CF2W94rp?XRL)aa9WQBfM}A4zD&1D^BkUc^wYkIht5){>O!xVH-R}CD zu(!Xg%}w}*uHuMTtVuZM#OB+x%C; z7U(wjIAMK?w7H(YsdoFFZuWVBZfDmM*6USm?zt0$&C%_o-wErnR-1e36k+G;HtURP zWk=BT@q1CXNB$t}^w+f8oc1SS&*}EyIl@j_r_D`1PuNqs-SanL$FA4r4*Ey6;(MIU z_qk5D+b$CJ(GA+%{+6lA?^4~){aLr=jR^bD>)PDNCWQS#x0^gvo9;8ke6Q{=*5-zI z682l&uDzD9_q?Ia?b3{}U+H$a7h#8%XmkCVtF~^ao4wkt+x!-Uy>p{B*Q+IAKh&6KxjVWNwn(@AuP1Ea7Hw{25Mfv7 zHnN*)bJ|Wd-)H}Kw7HwQ6Sh#dAvY4XO_?@#ZBN23)@@KQVOzed&0T)8YGsSw^qhQF zw|=(}wpqD0H$RlHPwCbxjId4K)8@_#C+r;ET6z)oAKgyvt=eTv-R$$U3hg$Z>O-H*xz-V7ERbOmD=2ceN`*F-=^o}aor}z5_Z%F+T1->!XDLad_Tgb zf2hsf+Mlq8bQ@z+t?ZVX?p5kmZSEF3VGrmwVgO;2KhowF#}js+Zo4HAHnB>ZyW&p5 z?$T|$!K#%_OVfQ$_*k1;Fodw%b=y3Vu(oa5-1$QZyH&T1hY>dR6K(GF;i{F*G}FC$ zPq%08ChTq7wYk~%5cX}|9!()^_@~<3ks}DZQMWbs5%%UC+T7vyt5$wbx9L8w*X^#6 zgzf&BHrJ6x*fqMXdXTVPcWQH^A0q6_y4{?i+P&9LH{Yv{pKEhNG70;lZr46SSifD` z+%At2Hea`eS%mf8tk51!k*Rbp~-}u@TE34Wh!A$>UQrm!e;N&<|fQg zt?ZGQUh96%^g`n*e7*cy^*kuj%srg-z4lT-R>wQ?BBZWzge{*rEd0W%Fo(uE`OV_ zf9N*i9l}0#Oq*L=M%WX&?N(0M(Z6VOSG-5qTHUsLU$t9KyV>W=T5ayaO2Qt|ZPO14 zoA#?VckWig9@Opmj|e;BxHfmf$EuZm8`JA#wQlRS5q5Z;HaBBCVfX6x;HQKg@|!j{ z=`+Ia)a}llgpIG)=Gu0tR(7;a_j#LcD|QpM{|Rkw?=J}Zfo@Co61MM2ZElZ!gnd`H z%fC`>Qkk2*>hrrc*Y9h>zNOm*)r7tEls4D%0Ab(I?d)#}d(&xc?ztMmzNXvp2URQk zRHomz)+@i#CT8Yo-QYcu&%~Ww*UmXt zo>#Kdn9#hxv)6W|73YQhfVZc;Abw$7d_TQc{+*b6c-r%^ft!3U>y;~3bb}p^_V(hN zm&VoptoO>LmHof`-#T5^E5Fi~9^0xlc=yM@6c_OA4_aSRT6vpm{_1RvKGf3nZ5AEf z!pxiP)j_Yu`8L_6*@MCcjPTH!hqJRTIGc%A&dr#4QfuL^bCkqQp0&|w<=nzC;o`jf z!KIa5*&XTq?HxSciu1cgZ;pR2=9Ub2Fn0T$k=70#U%R%VS!!uN!hZ3|s<`r8y&EpA z{GF9-Vm9oCUwNCWyjp0D*2Cwk;`~o6&~CG|@-|0pju&nCw_VQ_*D8VfRlND0<_mmo>+)oSPKWRcnqPJo8=L_MLk3R9ZQA-{5qsY=OFZ9LeT*kK<3- z)3(*edEKKo$A1tub>a0^`T0=4k{p+|I6GXszWR;%E6zJ4UR$rUa<1$Lila-rs4Co% ze*9Vac!;ZfWq@W63oDxAeWf?EvN^u1=y`k4>mIi4JEJtaOW1S6npnl(2E>T(Gnm+QasY(*d?Y@3ePXIk$0ePVC$l;#^wU4G$~bWRD*jWIME{wPyE-xz^q* zV*Nv3jx8D&O#F6zE@b^c^}5qc5iiAH~h-Sf{VlEhyj1m?Tdm8Mc5pd& z>rL~u_IKdPVYVJ&m#n$8?=ic7TF=2C#tzx3DHg~I- zTfN4|y0T+v?1+qLmvi4ODAiinVeOx_ZA}PtS~*wzoq24#14CR|i|B@@cWdETSNW{% z;_z;o{YcCe9qgEHo^xYGcigoV?eD*`d>qR&@@=cP*_^g+=(*UbwQFO=@623V(f-bA z*V++y^AcO&tyxYhJLewByG8r^Qb+sBbF%8hcCCdy&u6J^->$h%E7v>x$kEu$%(brP ziuU)axHgWlrqWvA!r@dqL<3+ZMesxZ}{mwsQBXa}Ab7(u)uX9Ad`6Cj=pQpIAFW-IK z5q`0QBd^m&o6*shR?d{y9J$}Fp7-CHR`0k+*aFS!9qlgS_4CzOi|B@b(zTQ0NWey0 zb(8Vh>-Onlko+C2oZGPNe&x9#p6?weL`Qph*NwKU3pq|JZ>zMS2R&TgW{c=|%lBoe zK^tu;kLD}(%s;(cThZ^9*01|U+m7xpDz9<+UCBx>@qtlf3UTneTHj` ze)p2#jke5B^p3XNE4f~2<#)Kdx{oaAcPHPn(H7#Xx3C*p`B`h17X9vaVH<4;->Fvq zy{Su!e)pb;jkZDe=w0cCbLF+xrA5DcVIOguSF2Y3{Mw~OzdJE%qj;TjxjpseV5|K3 zmrIL&chkNbZEKIKHY55{lUg|^(eK_7yU~_+LA4EA*osBJJGb9PTdk+wXg2mr-sXla zY{jDAonYH&3vKZqE&AP!2W+$*^--)ch$g+w&ATWx6}QGm|i!;Yj5pwG;h#G zTLn7W#(Lda^t)|$iN_II*jH`Q@4hf}qs;@~dwJCs{qBn4;xYJNE&AOv@7ZX}`L7oJ z?wAo9ZAM4ixXm8PZuS}d?u)77dUeHIw-){G$_EIGe)r^baX$aox#)LCK1^8jyU#sB zSoFKgvQ)d^MK}A5e)srn!lK_DHjc39cb|HUu;_P}P9iM&-4mv$R=f^1f9#{*9WkA; zdcXV3Ov0kyT|9@d=yzw&BP{yeAx{w&{qFjwRlBm|UFQ3Yes@V8VbSj%`y64>?+(o; zEc)HY3J8mS_qrv7MZbI0Qq}fMb+gatclTUQSoFJVR}dEc?hUI5i+*=j5n<8q4t|ZW z=y%txS8eIbZuS}d?&4y?qTijpk+A4@hm;c5=yz`>Ec)GR-ytme-I?X8jXvaNpV9B` z@jhYE?>_bcVbSki_Yq;y?;f>{u;_R9{8Y8ux+a=mW6|%f{fw~acW>B5SoFKIz9206 z-NE|^i+*?Ae!`;PUHpw|hormN=S02Von1p%^t(g8CoKBi^*<06{qB+@ghju5?9YTn zzdN*6wbg6f>@)h^Cyx^r{q9Zmghju5{O^QCzdP&;`cU#)4cETt(dxd`Y){cZlzdJX8u;_OubR{hM-CjY2MZbGz zH^QRdop+;ZFRXX7&**m#3MMT2-F~+a7X9x1w-Ofp?j;d~MZY__4`I>oZWpE6`N!St zGy2`t(S$|6yD*lp=y#{|BP{yefi}XT-+kCYSoFJB4^*vpNRs(A7X9w@L4-xWJNPcb zqTgLNl(6V`7Y`>a`rX<05ElLJkP)htzdJJhc{}>u^{IqKzkACAghju5ayntr?~Z(! zu;_Q6dxWs)cW=#7ZQv$1`;30~+-$<4-`#&4VbSkyJb|$2cki4;SoFK|rl?l@jgI*_ ziGKH>>4ZhU+iwLMZY`uFk#W}PWp+k z=y&@bBP{yeHMOdhf75Gv-9W#4c^zTV@6I?uSoFKQogysy-SuY(i+*>>pM*ufd-8eJ zp83bkKBM0q`43^y@3vUpRDK@ob}steTN@J={qEdr2#bDqLQ~bwwBBugPNLuKdmUlX z@7~vfu;_O$_aQ9$-Knh!i+*>vwuD8$`)GUBdd_vT&**oT_!AcW?y;Q-i+*=xAYsw( zJ{Lq-^t-oqCoKBixjj|8@FO?-jDB~*&4fk2+c%W3=y&f6BP{ye%X<+P{qEFA!lK{Z z?RM36@JKelZlK>?-zBcO(!N{qEes zghjtQDUq<~cl!?`Ec)FwcM}%!zw! ze4fVq8jF7S<{5-VzkAYb!lK_D^8{hh@AjNeSoFL1s5PTx4V6QLi>)CL$4DS{qA)oghju5{F{VDzdLF(VbSkCzlE^qcUQfu z+H{{3^J^^n-FX#+MZbIa2ZTkxJK!V2qThY+W5S}}y>>fc(eEDnnQH5Xy4h#+yCZiI z7X5C^7lcK>d&ifAMZY_LKVi}DPWnc*lM3AI75d%&HH1aK`^Z7UqTgNo17Xqco^*t; z=y%8bOjz{0J!@5a@hdm`jDGj_?%F(4zdLj= zVbSllBoY?=?yX6LMZY`$Zo;D9Jt9T5m9MzjEA+dA?jtPv-F5d97X9wcX@o_;d*(xg zMZepTNm%r|y&qL={9!lyjDC0Z{|JkI_sTJZMZbI0c*3IJ9W{}#=yx}sOjz{0cTQF9 z-s|r(zs929T`+^N=y#{iCM^2h-JT#U`rY+U5*GdL${fO?-<|V}YDZ+a*=O{-6BiH` z{q7En2#bFA;l+eSzk9A8K zes|_t!lK_Dxn8x|$K32Q`rVdd!lK{3qlB>NcQ1RBu;_QEZze4I-NEk=7X9us@2d9f zjj86>SoFIqDhP{y_ks@yi+*?VM}$ScyURAhqThXNJ7Ll9-t?JjXJ@(DXY{-0?jkJu z-3eb17X5DDeS}57`^bL6qTgNojcVohFq{6}Ci>lzY6y#dx8r-lqTlU(n6T(~S05oP z`rRvkCM^2h<7-u$|C^hAM!!3%j>k0ufp{q8Np2#bFA>}0~C-<^1`YLm*`>@)h^9a0I4es}Fi!lK_@`XFJ^@1B=I zSoFIGJwjOYy92URTl1HjeMY~#el%gx@7^+&u;_Q^JVsdbyAvl77X9wPDXPtge!%=X ziGFwOG{U0aT|Ser=y%VXLs;~?htDG{`rX~0A}spd^-rr-HlIx2!=m3^nMYXkyK|l+ zEc)Fk`GiHkJMekJqThXL31QLiu3Dn(eK_o zuGvdi^t+3`A}spdld1`eez)UW)h_?o%|4^w?R}82=yxAFL|F8@H~mOh^t&`?Oc>*3aDR75d$WI}#TC?hOHiMZbG`SHhy-Jt&B<=ywNn zCoKBi^*5?Ep?SLbV;}wQEy09EzdPp^!lK`u5=L0`yLWYmR&m zYW6<#gZ1TlzgzUBjSkTxGX4c;X4|jojDB|~yJ3f2rHz}Ns$0>BHmnvMyNbC{4?U;b z4@cN{pq&(am2+FY@s@6{qIG^{N9@#&pLKf`t@A7Glr}vEU2W_9YOJf)c^h%h$*qvb zOaC`5&T?Z{N7XjHb&mgDoEH}DU-F+fjB(WenQ6Avjd^j`-_X@jaHHNjm;e3Bjx9x# z`eLkXkh-?#Z4=rrkBjZv)sZ&;VXfsS?c~ZX)-9+1i50C#XRrLq+nnR)Z9jQzW!x=Y zWN%sTl}j6X@CMDwfA4B;(A-vzd~X*=&XV`ExzZka z)jxJg+)gp~q}jP)AM{8!+dJ2HaV&UoxzqMfdo?!yw~E*vVV}CTq8n~I9b;=$+u1Sb zohO{Otn)S21uvGw=8b;MwG~~;ovlXL))jYlWJHf}+N82rYf{;Y*ma-exwOMR2$t<{ zTdOgh9Yy2%IIZ00b_bq|-9P0~msYmFE%DQA)3ZA}yhC)GfPHprMT7gu{Dro6gE~7x z@7H_Q(k3o=Q8Zd7$EGgq?|QE2Xzv(PWDEN`z!7@zqSg+UR(4iJ<0kg#7uUPAtA>n_ zJ!_j!PJm;k^=qe<>y`c9L6OZ|TG`QF5LsbMc{0G^AG%+&cZ#_wQN8*OKhjOyzfE0R z(ZVk4Ty1+iFu-x@%TlM^H~2?!-yVzUpLW#cbwl>7vmg1x_I_C>NAt!@oHp4zTQuP( z#mM%stLw&FkuzkY*_OV(ljG1`i=9?~9Niluo5!x~w((Co0>1RJ%j^5vmu6{pkXWz0 z&GM0GcCC{g?E}Tv+24v1z14*?G@B-@yk^Lj=GM3O%_sE-^R^DdWYZIRXbZKSJdc&~xO;NfcB1{nxl6#7UzSsa;|(% zshHT>rIju0s3&LIuMKGLShn=AW;2A9kAvFozsL4WZRy&Io^^%&S^JaQ+d5X9sB+rk zPSZqhd}*wB4({vf{vCGYEz!|_)^4qAE3T85th~)JUN4$jam{elbzNqEuU{KS`M_dr z?!)3X%fBaz@v4sT4?XB|?rj?@#QD73Ue>scqw0@!PAlhn&V3=~%s*ebw2zJZz_CAU zmEAJV&*720TC<}AeW_-rX||s=U_^#EuReBZ z_e|L45ciXPK*!dOtcvHIR^Dd$eX6swDqLENXmGFi>23Rn{jD5HWzT4Kx|mzg{sz&c zzDfAaF72;FcgyRu{e*8T$AnSyomS4hu)dWwOgtc5-M<#m@9vOSW&f&$kE3VmY|YLP zbG@pYSeJkNgM4k~(&pY>?GU%y{?4^69r?dabXqw#r1a(3Bd@;X(pp6Od&B83?9V58 zI}Yp~tJ#@iZp~knvEuJHOzqGmKZwWgxArf+TZnV=5vP@N#m`k@Dkc`Vo-4ZHrA@xK zkKfc>T(2@UJ4?(J->DWWUhDKVvxATQ=91Hg~a@E39};m}oV#Z$JO5LwNgv>#q~njZ0RpS6(O0tn7xDJo$$`=lf=k zg&#ev%`FgfgLC>=cdqJYYB!|)A)g2B@h_7 z>gV$dVy?ViTv-36xfQ+g(GN6pjDE(`F}3zNZLYMQbK6+=4Ni|WdkosTHXR_(i%&OR zBVOmc?6mSW%hxHU*BsG0zx$>Zj)xaq&JrVq;J04$MNrL9*(f4A8K}km>UssI#!;S3qSh9wH2N8 zv>$vO7b=@LW@r86w7WhZWR-tQZ1!9>?%vh|4@<&7R-^{NhX z{ahv*>mB~~b9jH#*b%r%f9`%=*q*5iW9nyE&7ZqPV?AR{TgQ^W8#(++!nC>44oP1W zdu~{exSw3VRuQkQ2Auk}onvFCMvfz|KIF7s)kk7azUFD2Fe=~mZ5EC7y~o-+lAz+UN#hSgIw1~!f)7LvV>Ps%#FMd_-v|Z*0YtP-KFQ0HdS2Wfu`gL@K z{dmFNKkbCm%J(=$<6>f$E$!g?wYO-j`)%py80qu3-RqtX+It+iSEKR=Yp*56>noSG zDCm{}r|LRt*N$$UVNR=m*R^~tobJ+EM4vt1+uyM#>MwhKwoS7ih`I9nLLwq2h}TKe zU0cy--#XOaaq+=F?PC{DcUt*#Yx(`a%RgS}+KN7V&!_wyTXO%fx9hOVX?ui?vTpvg zkJTf2w`(i<>|PuF9j7*(wx3Gc?X;uvv#pZ~+FJ|Sd$@kzE&A-g?e=$+SO0FG*~LS9 zzc1H2VbpkQR>c|d-m0f-EBfraj`=%0FP^aH^y%reSrrqlbwkTy<#*Y*dfqQMZ6A>R zhrgrp{d(~;gLtP6&UsvW4=W#wuB=70-Ny@CA#AdDN}T-X{Kbn|!^(5Sl`Y8`FyJv* z`SYABt?69Rg@51L$+54Z-X1!5PMo}AOqTyo+*bMCIsS0D>-z{@`0DrS?R9THtJwVC zI=Q^f7Ifhcl-Jug^m|^hb6<#aZPA6VdAHtP_I=@H)+713SoyhDS9>MiqYRMmBM+9< z+oLzWq*(ED2G_h_{8vb~a4ynb7hU)sD+!A(eDEvc+J41-C z(S?s%Mp$&=qnE07e2km*q6;5WNLX~?tuGQ5UHJY>2#YSf;|0}@EYCH+f6;|cc%HE6 z!VfASEV}SR784d-_@sQ*il2F#uNPhT0e+NjM>o39sL z__SvUi!OXd9%0di&wPfk=)#Z6Rc((jH|s?gKI>`1q6Q1!7k>35)$Uw1-~9eX7rtmBVbO(OH-WI|!WTbASajh_#;dlhvzzsz3%_X`VbO&z z9ZOhr;kS$-EV}UJ*{WUp(v#-*FS_s*qX~;HeC7WLi!S`uEW)A-U-hVJ3);I`FS_vC zM-diX_??ds7G3yVnS@0be$T_Io%_N(^ZOTF_a{y;im(S@&hP_<)y z-K-Z~_=9PLMHl|?1B68v{^&@;q6>fQe$}Qfe8T+xMHjv{m9Xf-*WE{0bm8kq5Efnd zQ}?Pip}Cv&q6>dEg|O(tpSy>!=)#{*CM>$}7w%SVWbRz^`xjk!3uDoR_ZUvhMHk+4 z7-7+cZ<<6{bm6^*s-^HK{pL82x(S;u#Nmz8@Q~D4V zUHH`As@4BK&*b-#=)#ZeMObv<(;^6qE_`}8VbO)p3{!1T%~bRI7hU+QTM3IUe0C^d z(S;v-3t`cPA0MLHi(gGKUoX1w6K*Cfy6}^O35zcL)SC#4F8s`%s+FJhFn#=@3qSit z!lDa5w+CU-g`eM@u;{|)c2n)qQrSNms(63t!lUutpcYGhxw%UlE{M@%L)x>qQrSWhcU-3t!|SY$D6MgUHGj&ghdyATT8;C3%|pgu;{|?Y@yofHRH_Ji!S`G=7dETe$RD; zMHhab7h%zbuWqK=(wE1YuNPhT15F8wE_}_kghdzrpeJF`g+F|aY8SjX#(cf#!XNb@ zEV}T=nh+LU_}a#VMHjxVk!r=?ZJVzbUHEznVbO&@_3v-W-)^~`i!S`xi-biN{@g#R zwdIXAUoX1w=PwWzUHA)s6Bb=~3uDoR_c%|?MHk-lFV%LL|3CA+LKnX2Il`g~@AW5P z(S`RuOIUQ_eg9DH`B_=!>qQsd?+jtlg>QG7u;{{fI7L`=;r)MCt^Dl1>3b`5;R8+* z7G3zj6NE(bHp+ax=)(7`BP_b`!N&=UE_~>(ghdxVqE@xybByNe zMHjyJFNFP0@4`nOBP_b`Q9lzFUHF)zs?E&IG+!^e@YbIQi!OZsBZNg4-ti-0(S?sc ztXlCIKJ)dW3!m@M@UHB2- z5*A(fkq1qQqn{Tsrf3!hOzVto9q6@#doUrJ^Z+Ta>;`7Sp>qQs7tcL&ghdyAM=4>^h2Qz6YQ=YMnXeaJ_&u8li!S`W zjf6!Pet!vJ(S@&mL$&EXDdy`%7ydvoVbO&@_&QMq0f9y5Fq6=TUmayo;*S)G*`Mpl2&%fxxpDZFQy6~sg5Efndv#SYH@D|3R3-7Uln2RpF=gX?y^XD-0y+Rkh>2kuN3-9$3VbO*6 zUPf4S;eD2>cEj&U=Iccl-mj3b=)$*qk+4P=ehFdGh4+6!weyb;HD52f@Bz;g7G3zj z0>YvTAGDaT=)(8NS8dwSMDz8c3*U1QVbO&TUPxGU;X|GyEV}Sv3sft=x7PI9jxK!f zX9E_~8l)r#-VGhZ*d@Wba27G3z1*@Q(Ge#9)oq6?oo zQ?=_pxzl{T=)#YjL0EL*)29;_UHHstghdyA)Kt~Z`!Kd$uxiEkU7N2LUHHR;2#YTK zkvj>CF8t91!lDa*Y@lj)JRWPlUUcDW;|XhY;Rg^FUHFp@!lDa*#;#iV9o?qaD|F${ z+6ap-{JA*7q6>e%KVi{@zt~T;v$Oh|uNPf-3uDoR_qcnUUcCn`wnz39Tv^C2v{@bg;|7G3xpZ^EJrpW8yUD?5gouNPhT1Bcx!Y^s2+Ub6`n6DRI_`;@yMHhbAwS+|%ez_-M(S=`ejcSv$;pw1kpLx8-zxk=}(D%|)XLl85e+ zJGYajb-(CV^f(P$)iGCLH&PryVJ^l zbIG<9|4H_JjHCQtMdMYpe5S=2jrB$HA3$GS9*rK5VT_|NT4Q~|pVQ)u#=5k!vF8*)>sdD;>kFp zu`aFb4*Qi{D|%p;8taBJjcnY^=-vcf-cIVT_|}TFS=yp+iM+Mq^!C z*;tR-e52?!UTUlx#yHA$S&e9{2mP-&&S_3vv2GaS_zD{9X9D)d8I5&m<$A^6FpCEFrN+8pjH7I< zFA8hB1y7G}hzy9*i>@>(a_z`B>kFL=*E;W8E;u zQ9K5<#(L5(zs4Djb!lZYTzn^txMp2ytQ*ESzKO>AmVI?`Mq^!Cx!%GtkBN^>U23cw z#yD;jjr9()v2HmZXEfHo!)=z{>{=%q>jy<+ec6ulaYkePQ(zi-uVKW-*(#pBX-fzS(a``y7*avwP9o3Fvc+njrDW> zciW7{y0r2(AD-6B)XK(sy=bhD_-VAwXsquNRyNklIzMb}*jP8_8OKO8)~A0p&So^$ zrIm9J-IXnxsh1k-hB1y~MPq%RY^;mdEJkB}x3GF+eU7zZ@7OTLQ8w0nL}NYa<9Rlt zv0kp(e)9IlG;FLJ#yIXqV}0ivIX0uQF0H)H@;zO{#=2pQqin3N6pi)B{rNVdvHqU0 zvax=C)`rXa&W16LwP>v8{qmx10vhYm%DGL8zl&|yST~Gul#O+N(O4hVYNgF+tXBvt z8|$Snx0Kf_XJ^?k#_?%1)`#?8XEPe>(#pBwXB5_k9c{xHN7+~}7LE0c<8RoE#`^oh z%EtPp$Kp-xU1+RNZB{1ENxl6ot(-gm`1s4(-^M)SXc3L|EuyhL{X)68Ug@3mN@3+^ zam05Eo7#b*vF;@s>vP_-8I5&mjqh-6*g7}n8AsVzpDY^dk!`lyjK=x~-Dmed3$X}WBqkuWn(?7Vza4ThsOH!y8Ys7nDl*? zR?eMN@QtaJjrA(gSfARv#%46ui-nbq^{t;ZGQEEXqOsoL+!346SeI7L9e$*n={YGI z>-UJp`l%s5iRVGRvHpgz@_NxYxQDqFjrDBNSTDZ0&So^$rIn3!+5T?WSU2uh;~0m= zdR3#-HlwjFt-Q_Rdm+rPIij(iEE?+v{m$5o#(Igcva#-0@`$NzDjMte%Er2Qk7YF0 zrPUkjbHsawOO18owi-wA`gy?HqOsoej=yb2V||&hvav27_YE8ChB1y7G}e7=jqFBa zU0T^#FCW-koExoOKK~kx^+U&-*p0@zv~sWHcS# zV_kl?zv*+gXskah8tWT|Hnkg#b!lZ|-6!Q;)7L6SV?BP?b#|k%F0E{=2W~nQ+pw{2 z+*adgL1Volsio+o>y343Wn;aAM_bd^lSX5`+mF7^#=5k!u`Zvx8#dOBdB)L##`=os zZJmvEY4!IwiK5Ydsj+St<0#&zXpQywE*+hXb!lZkTYTr9wP9o3Fvf9}(OAEulig^n zORG25_50Y>dv>F-p80uzv$38dtZb}{b1SA{W8Ii%94%<9H=PmaY^+Nw8|(6CPz@XF zhB1y7G}a4m?&fT)ODh}eg<}@SHf*dL#yEhstQ*ES zT0~>LUNqJhF1p#-SeI5d)_1PDM!dJW)L1u+akQYZekSf#XJcJj*;p5kA#shq)L1u+ zaa@eXdd|n;&c?d5vXd^~!^+2^%lin8^*mvX#`+O7)}?LOSU2Vw$2ZVem)2;kH?*d6 z(O4hxNw~AIuH8s+Tjkt_jdf$5akQYZp8RRJv$5XLHf*dL#yA>{^&y{yI~(f_ZNtX8 zVT_{%jrD}j!<~(F!(ROAHfzJix?zl?(O7rv4tF-z8`_4Ab;B4(3mWS&UxYgw>kVzg z#=2pQqXmuis4v5vjrE4MVPoAe#?gYtde~Rt;%A$BbKI~wZKK3}G)?=D^4v0vaTM?O zwWfT?*Wq@fvEI-&Y^)o`I9kwH@A*x*v$5XLHf*dL#yDEgSP%R*+}T)fXd5=x4PzWF zXsmbmF5KBzH>~(O8EZ!LrPjG&jH7seqOI5O`*3Gty`gQ`ST~Guw4kx>^Fz3^vEI-& zY^)o`I9h(u8|%$~40krxuiB!q?(vg&9HFsp*kwzj%&!}0tXqB#cQ)3q+M=<3?pV09 zv3}JSjrB9N;m*eTRa-RH>yC#z8|znX(O5tBTe!2aZrGq2H+zM~`jHdi&c^yxTQt^d zekUv%>(!^_<5zF2U-eux*7y9O+Tu>Po1YtKtnd7juxPAT{Y6+b)+_%eEE?(O93~lCWs3&uv9mG}foKCM+83liH|u@Ac8<=QA4X+3g66 z#`>rZghgXL!=JEdtfvO3R{RaK`Fhbp@|JMPt271YyxwZ`WJ3XK(Cl zemvA9WBGjrD`^ghgZhKmuXWSl>TLwc^K`=Jzie>$`>!7LD~CiG)RCeQOe7 z(O9n-u3G=l81wa_v0j!;STxo*r4ZI=tdAfp8tX-=suiCvGhZ(n>nleR7LE1gX@o^% zy)d1yXsj>HP;E%5oAsixo|8#fG}h;hA}ku~v$F_`#`@&Zs@-zh&3e&TpD>27Xsl`$^Tx->I1X>>7>rkf#WX#(K}E35&*hw`T~8#(KcBs;&9U z&3e&T_j``8Xsr7zA}ku~%@-3EjrFF_t5&=nG{1k1#`+S%qOpGAMZ%)7er73Q(O5tE z5@FF;uX|aw;XEghgY$dJSRGSl{=mYA08^SuYyvdtM_f8tdEF z6Bdp2s@DmN#(Mc1sqTRI(ssh4v7WtyuxPB0+DTY6*3)*Wwwq@^^ZOT#_0&CtMPof>FJaMGPuxdXG}Z_0 zS8d4vH|s@X-BwLlG}f&L2#dygbPZwASdToYTJhaV=Jzie>!F7Ti^h8JVZx%Z9(07T zXsmZRs@koexmhn7>;A_Gi^jTdEn(4E_dZToG}b+TQ?2~ldeiqBXskCrL0B}_FPyBaMPt3lhp=d@FZU%Z8ta9Asy#Q%&3e&TFKA0xG}iOl6Bdp2 zoQ{M=V|`vH)ylsIG`&8fu|B;sVbNHh+?B9stdG5(uxPAj-Jn{pYB%deV?DDwVbNGm zzmc$LtdF>fuxP9&->lk#_BQkT7mf8Hw-6SM^@Ljqi^jSmoUmxD$MjNdzz8?%MPogx z4`I<*54(-9Xsn0aPFOV7d-heW_}g~#`xlM%z*xefu^w;-VbNIk>rYrT)_rWM4L#^) zy=bgAcMukh^``NJH5%&)ghgZh!kvUgWBtrv)s}U(o1YtKte?D#uxPB;4J9lZ>&J!> z7LD~ocdNF4nw#~av0ifzVbNIMcQ0YlSl@FWVbNIMdB1AK_lld}zi6yiJwRAA)+-+* zEE?tS=c$ zSTxr2#}gKf^#v0Mi^h8HB-LhSx>+w8>vJa)7LE0pQwfX4`lRWEMPq&ZOx21Gg8BW6 z#(MT_!lJQ0YA#{XSWlZrSTxpC=c{&Vt(*0tu|7P9uxP9&<`Nc-^+9=rMPuEzK(&p7 z2bkZ#XslZo5*Cg1$b7=0u^v%CSTxo{Ur??5+_C9p@Eii^h7Fmk5i- zdb^iZE4nS_>qTST_Z7mTvF^Q!uxPA%tsyKL>y2Mk?TQm_){Dma#kGV*WBu$p!lJQ$ zY6D@>Sg$WuZI7^c^ZOT#_1Y4`qOpE>6JgOpv7Y`tVbNG0`2%6mSWo_uuxP9& z{iNDsf4f;P8tX%TCM+833BM2)jrIP&5*Cg1m^#&tk4Z4Uf6-X)T~Amv*27K`7LE0g zQ-nohz2_O#o}cAry=bflo+T_A>jCEoi^jU&dBUQx?sGx4qT^|P|Dv(p{32n|SZ`|S zt^DrT?Oda=-k7jxtY2tCSTxqpT%+3NjqfzyD>T+mUQ1Xs*6W%P7LD~|*AW(t^+PRG zyClxddeK;~X-QZ#*7vm{EE?;3S`!wH^_^{0+hx9+^`f!9y&YlESg-6rSTxql{RxZ4 zdTD@aMYGuad`4rvqzhruSl&to&7LD~KJqe4( z`hsA>qOqPEqT2F-Zq|#&`utGBqOm?RjId~|PmLff8tdbGtJaq1X1!>vXGanijrCDc zghgXLBbu;itf$7PR(^M$>G_PtdWw~>Xsi$KM_4r02gMNjy`x)+5=?deK-vFov*btnVF1STxpmJw{kG z*0)Vm?SdEGtQU>-t&bBHjrEEtghgY$Y#L$FSl={5wX(}$dc86l>$3=p#(L2l!lJRh z@(IGCvA+CC)voM#m-%|pSTB5vuxP9oJWW_M*7KerEE?-M&#Ja(s+;wqu|E4b!lJQ0 zeGy^NSf9L@uxPB0eO|StFS}VU8tYk02#dygW+7qGSRc8JuxPB0SgzXWLvGfK#(MG! z!lJRBw34uBtS77{EE?;MBGqo|nrQy`MPof?En(4Ek6K4qG}e1>AS@c|Va2K)lI~`` zXsm~n5EhN~Zkq^;#(H2WVbNIcuvxX$Yuv0Cjdi~*ghgZBr;M;@tT!(wEE?+`6{<}; z>Sn!0W4)5FXsloOfUsz+pV>-SG}cd65f+X0x^1eh?LO4}v5&_3vF(IKWBt$$!lJQW zvy-rBtnb^U+6klFtQU>-J$nd?#`?~^ghgY0`#!>=v0k-bwHMaASuYyv<<*2mV|~j3 z!lJQWQbSlY);Anf?fm0z){Dma+CzjzV}12u!lJRh>oZRf7LD~uzY`XX_3@_(i^lq>KU5pI$<2Dv zSkL&AuxPBO{Y6+b)>Ho`EE?-6|EPAu?{3zM#(LtvghgY0P@_J|@15PwMPuF8gs^C= zTdz@VM8q)j$1fV|(bp0djrGW8ghgXL;yS{ju^!w)wG|WHtQU>-9xVxr#(I}lghgZB zzcpdeSZ~)xwT>-r){DlvZ#%-GvF_D@uxPA%`V$t7^~M3J-SelL^`fzUzB6IbSU=m9 zuxPBGx}LCTtk>V5+LWl_=8s=A)@!>H7LE18Hxd?&^@BGN7LE1&H>=iYFS%^`ps~L9 z7Q&*jzUx-PqOrasoUmxDZ|$YptO_^xFB)z_x}KS7<3W< literal 0 HcmV?d00001 diff --git a/autotest/test_mp7.py b/autotest/test_mp7.py index ecadfd75d..00ba238f0 100644 --- a/autotest/test_mp7.py +++ b/autotest/test_mp7.py @@ -3,8 +3,9 @@ import matplotlib.pyplot as plt import numpy as np +import pandas as pd import pytest -from modflow_devtools.markers import requires_exe, requires_pkg +from modflow_devtools.markers import requires_exe from autotest.test_mp7_cases import Mp7Cases from flopy.mf6 import ( @@ -718,47 +719,15 @@ def ex01_mf6_model(function_tmpdir): return sim, function_tmpdir -@pytest.mark.slow @requires_exe("mf6", "mp7") -def test_forward(ex01_mf6_model): - sim, function_tmpdir = ex01_mf6_model - # Run the simulation - success, buff = sim.run_simulation() - assert success, "mf6 model did not run" - - mpnam = f"{ex01_mf6_model_name}_mp_forward" - - # load the MODFLOW 6 model - sim = MFSimulation.load("mf6mod", "mf6", "mf6", function_tmpdir) - gwf = sim.get_model(ex01_mf6_model_name) - - mp = Modpath7.create_mp7( - modelname=mpnam, - trackdir="forward", - flowmodel=gwf, - exe_name="mp7", - model_ws=function_tmpdir, - rowcelldivisions=1, - columncelldivisions=1, - layercelldivisions=1, - ) - - # write modpath datasets - mp.write_input() - - # run modpath - success, buff = mp.run_model() - assert success, f"mp7 model ({mp.name}) did not run" - - +@pytest.mark.parametrize("direction", ["forward", "backward"]) @pytest.mark.slow -@requires_exe("mf6", "mp7") -def test_backward(ex01_mf6_model): +def test_basic_mp7_model(ex01_mf6_model, direction): sim, function_tmpdir = ex01_mf6_model success, buff = sim.run_simulation() - assert success, "mf6 model did not run" + assert success, buff - mpnam = f"{ex01_mf6_model_name}_mp_backward" + mpnam = f"{ex01_mf6_model_name}_mp_{direction}" # load the MODFLOW 6 model sim = MFSimulation.load("mf6mod", "mf6", "mf6", function_tmpdir) @@ -766,7 +735,7 @@ def test_backward(ex01_mf6_model): mp = Modpath7.create_mp7( modelname=mpnam, - trackdir="backward", + trackdir=direction, flowmodel=gwf, exe_name="mp7", model_ws=function_tmpdir, @@ -784,109 +753,42 @@ def test_backward(ex01_mf6_model): @requires_exe("mf2005", "mf6", "mp7") -def test_pathline_output(function_tmpdir): - case_mf2005 = Mp7Cases.mp7_mf2005(function_tmpdir) - case_mf6 = Mp7Cases.mp7_mf6(function_tmpdir) - - case_mf2005.write_input() - success, buff = case_mf2005.run_model() - assert success, f"modpath model ({case_mf2005.name}) did not run" - - case_mf6.write_input() - success, buff = case_mf6.run_model() - assert success, f"modpath model ({case_mf6.name}) did not run" - - fpth0 = Path(case_mf2005.model_ws) / "ex01_mf2005_mp.mppth" - p = PathlineFile(fpth0) - maxtime0 = p.get_maxtime() - maxid0 = p.get_maxid() - p0 = p.get_alldata() - fpth1 = Path(case_mf6.model_ws) / "ex01_mf6_mp.mppth" - p = PathlineFile(fpth1) - maxtime1 = p.get_maxtime() - maxid1 = p.get_maxid() - p1 = p.get_alldata() - - # check maxid - msg = ( - f"pathline maxid ({maxid0}) in {os.path.basename(fpth0)} are not " - f"equal to the pathline maxid ({maxid1}) in {os.path.basename(fpth1)}" - ) - assert maxid0 == maxid1, msg - - -@requires_exe("mf2005", "mf6", "mp7") -def test_endpoint_output(function_tmpdir): - case_mf2005 = Mp7Cases.mp7_mf2005(function_tmpdir) - case_mf6 = Mp7Cases.mp7_mf6(function_tmpdir) - - case_mf2005.write_input() - success, buff = case_mf2005.run_model() - assert success, f"modpath model ({case_mf2005.name}) did not run" - - case_mf6.write_input() - success, buff = case_mf6.run_model() - assert success, f"modpath model ({case_mf6.name}) did not run" - - # if models not run then there will be no output - fpth0 = Path(case_mf2005.model_ws) / "ex01_mf2005_mp.mpend" - e = EndpointFile(fpth0) - maxtime0 = e.get_maxtime() - maxid0 = e.get_maxid() - maxtravel0 = e.get_maxtraveltime() - e0 = e.get_alldata() - fpth1 = Path(case_mf6.model_ws) / "ex01_mf6_mp.mpend" - e = EndpointFile(fpth1) - maxtime1 = e.get_maxtime() - maxid1 = e.get_maxid() - maxtravel1 = e.get_maxtraveltime() - e1 = e.get_alldata() - - # check maxid - msg = ( - f"endpoint maxid ({maxid0}) in {os.path.basename(fpth0)} are not " - f"equal to the endpoint maxid ({maxid1}) in {os.path.basename(fpth1)}" - ) - assert maxid0 == maxid1, msg - - # check that endpoint data are approximately the same - names = ["x", "y", "z", "x0", "y0", "z0"] - dtype = np.dtype( - [ - ("x", np.float32), - ("y", np.float32), - ("z", np.float32), - ("x0", np.float32), - ("y0", np.float32), - ("z0", np.float32), - ] - ) - d = np.rec.fromarrays((e0[name] - e1[name] for name in names), dtype=dtype) - msg = ( - f"endpoints in {os.path.basename(fpth0)} are not equal (within 1e-5) " - f"to the endpoints in {os.path.basename(fpth1)}" - ) - - -@requires_exe("mf6") -def test_pathline_plotting(function_tmpdir): - ml = Mp7Cases.mp7_mf6(function_tmpdir) - ml.write_input() - success, buff = ml.run_model() - assert success, f"modpath model ({ml.name}) did not run" - - modelgrid = ml.flowmodel.modelgrid +@pytest.mark.parametrize("case", ["mf2005", "mf6"]) +def test_mp7_output(function_tmpdir, case, array_snapshot): + # build model + if case == "mf2005": + model = Mp7Cases.mp7_mf2005(function_tmpdir) + else: + model = Mp7Cases.mp7_mf6(function_tmpdir) + + # write and run model + model.write_input() + success, buff = model.run_model() + assert success, buff + + # check pathline output files + pathline_file = Path(model.model_ws) / f"ex01_{case}_mp.mppth" + p = PathlineFile(pathline_file) + assert p.get_maxid() == 22 + pathlines = p.get_alldata() + assert len(pathlines) == 23 + pathlines = pd.DataFrame(np.concatenate(pathlines)) + assert pathlines.particleid.nunique() == 23 + assert array_snapshot == pathlines.round(3).to_records(index=False) + + # check endpoint output files + endpoint_file = Path(model.model_ws) / f"ex01_{case}_mp.mpend" + e = EndpointFile(endpoint_file) + assert e.get_maxid() == 22 + endpoints = e.get_alldata() + assert len(endpoints) == 23 + + modelgrid = model.flowmodel.modelgrid nodes = list(range(modelgrid.nnodes)) - - fpth1 = Path(ml.model_ws) / "ex01_mf6_mp.mppth" - p = PathlineFile(fpth1) - p1 = p.get_alldata() - pls = p.get_destination_data(nodes) - pmv = PlotMapView(modelgrid=modelgrid, layer=0) pmv.plot_grid() - linecol = pmv.plot_pathline(pls, layer="all") - linecol2 = pmv.plot_pathline(p1, layer="all") + linecol = pmv.plot_pathline(p.get_destination_data(nodes), layer="all") + linecol2 = pmv.plot_pathline(p.get_alldata(), layer="all") if not len(linecol._paths) == len(linecol2._paths): raise AssertionError( "plot_pathline not properly splitting particles from recarray" @@ -895,7 +797,7 @@ def test_pathline_plotting(function_tmpdir): @requires_exe("mf6", "mp7") -def test_mp7sim_replacement(function_tmpdir, capfd): +def test_mp7sim_replacement(function_tmpdir): mf6sim = Mp7Cases.mf6(function_tmpdir) mf6sim.write_simulation() mf6sim.run_simulation() @@ -947,4 +849,52 @@ def test_mp7sim_replacement(function_tmpdir, capfd): mp.write_input() success, buff = mp.run_model() - assert success, f"modpath model ({mp.name}) did not run" + assert success, buff + + +@requires_exe("mf6", "mp7") +def test_flopy_2223(function_tmpdir): + mf6sim = Mp7Cases.mf6(function_tmpdir) + mf6sim.get_model().get_package("ic").strt = 0 + mf6sim.write_simulation() + mf6sim.run_simulation() + + # create mp7 model + mp = Modpath7( + modelname=f"{mf6sim.name}_mp", + flowmodel=mf6sim.get_model(mf6sim.name), + exe_name="mp7", + model_ws=mf6sim.sim_path, + ) + defaultiface6 = {"RCH": 6, "EVT": 6} + mpbas = Modpath7Bas(mp, porosity=0.1, defaultiface=defaultiface6) + part0 = ParticleData([(0, 0, 0)], structured=True, particleids=[0]) + pg0 = ParticleGroup( + particlegroupname="PG1", particledata=part0, filename="ex01a.sloc" + ) + mpsim = Modpath7Sim( + mp, + simulationtype="combined", + trackingdirection="forward", + weaksinkoption="pass_through", + weaksourceoption="pass_through", + budgetoutputoption="summary", + budgetcellnumbers=[1049, 1259], + traceparticledata=[1, 1000], + referencetime=[0, 0, 0.0], + stoptimeoption="extend", + timepointdata=[500, 1000.0], + zonedataoption="on", + zones=Mp7Cases.zones, + particlegroups=Mp7Cases.particlegroups, + ) + + mp.write_input() + success, buff = mp.run_model() + assert success, buff + + pathline_file = Path(mp.model_ws) / "ex01_mf6_mp.mppth" + p = PathlineFile(pathline_file) + pathlines = p.get_alldata() + assert len(pathlines) == 2 + assert all(len(pl) > 0 for pl in pathlines) diff --git a/flopy/utils/particletrackfile.py b/flopy/utils/particletrackfile.py index 5cc7da452..75ff3972d 100644 --- a/flopy/utils/particletrackfile.py +++ b/flopy/utils/particletrackfile.py @@ -141,7 +141,7 @@ def get_alldata(self, totim=None, ge=True, minimal=False): List of recarrays with dtype ParticleTrackFile.outdtype """ - nids = np.unique(self._data["particleid"]).size + nids = np.unique(self._data["particleid"]) data = self._data[list(self.outdtype.names)] if minimal else self._data if totim is not None: idx = ( @@ -151,7 +151,7 @@ def get_alldata(self, totim=None, ge=True, minimal=False): ) if len(idx) > 0: data = data[idx] - return [data[data["particleid"] == i] for i in range(nids)] + return [data[data["particleid"] == i] for i in nids] def get_destination_data( self, dest_cells, to_recarray=True From e2d16df5cc1a27a43e274a5b16eee7d91d5decfa Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Wed, 12 Jun 2024 11:23:22 +1200 Subject: [PATCH 21/57] refactor(datafile): use len(obj) rather than obj.get_nrecords() (#2215) This PR has a few aims related to data files, including FormattedHeadFile, HeadFile and CellBudgetFile. These files have a "number of records" property that was implemented with get_nrecords(). This "length of the object" measure is more naturally done with __len__, i.e. len(headsfile). It is advised to prefer the len() approach, so instances of get_nrecords() for these files show a DeprecationWarning. The CellBudgetFile also has a .nrecords property. It is also advised to show a DeprecationWarning with this property. This PR also fixes a bug with get_nrecords(), recordarray is a structured array (np.ndarray), not a record array, so this always silently returns 0. This bug does not apply to CellBudgetFile, which worked fine and matches obj.nrecords. Fixing this bug caused a test to fail, since a for-loop was never activated. A "todo" note is added since the reversed header is not the same as the original header. Another aim of this PR is to re-organize a few CellBudgetFile tests from test_binaryfile.py to test_cellbudgetfile.py. Most of this is copied with perhaps minor simplifications. Note that none of these changes apply to flopy.utils.swroutputfile.SwrBudget.get_nrecords(), which returns a tuple. --- autotest/test_binaryfile.py | 107 ++++++++------------------------ autotest/test_cellbudgetfile.py | 90 +++++++++++++++++++++++++-- autotest/test_formattedfile.py | 6 ++ flopy/utils/binaryfile.py | 38 +++++++++--- flopy/utils/datafile.py | 21 ++++++- 5 files changed, 167 insertions(+), 95 deletions(-) diff --git a/autotest/test_binaryfile.py b/autotest/test_binaryfile.py index b8c7df751..7e7c939eb 100644 --- a/autotest/test_binaryfile.py +++ b/autotest/test_binaryfile.py @@ -1,3 +1,8 @@ +"""Test flopy.utils.binaryfile module. + +See also test_cellbudgetfile.py for similar tests. +""" + from itertools import repeat import numpy as np @@ -8,7 +13,6 @@ from modflow_devtools.markers import requires_exe import flopy -from autotest.conftest import get_example_data_path from flopy.modflow import Modflow from flopy.utils import ( BinaryHeader, @@ -255,18 +259,6 @@ def test_binaryfile_writeread(function_tmpdir, nwt_model_path): assert np.allclose(b, br), errmsg -def test_load_cell_budget_file_timeseries(example_data_path): - cbf = CellBudgetFile( - example_data_path / "mf2005_test" / "swiex1.gitzta", - precision="single", - ) - ts = cbf.get_ts(text="ZETASRF 1", idx=(0, 0, 24)) - assert ts.shape == ( - 4, - 2, - ), f"shape of zeta timeseries is {ts.shape} not (4, 2)" - - def test_load_binary_head_file(example_data_path): mpath = example_data_path / "freyberg" hf = HeadFile(mpath / "freyberg.githds") @@ -315,9 +307,15 @@ def test_headu_file_data(function_tmpdir, example_data_path): @pytest.mark.slow def test_headufile_get_ts(example_data_path): heads = HeadUFile(example_data_path / "unstructured" / "headu.githds") - nnodes = 19479 + + # check number of records (headers) + assert len(heads) == 15 + with pytest.deprecated_call(): + assert heads.get_nrecords() == 15 + assert not hasattr(heads, "nrecords") # make sure timeseries can be retrieved for each node + nnodes = 19479 for i in range(0, nnodes, 100): heads.get_ts(idx=i) with pytest.raises(IndexError): @@ -334,6 +332,7 @@ def test_headufile_get_ts(example_data_path): / "output" / "flow.hds" ) + assert len(heads) == 1 nnodes = 121 for i in range(nnodes): heads.get_ts(idx=i) @@ -361,41 +360,6 @@ def test_get_headfile_precision(example_data_path): assert precision == "double" -_example_data_path = get_example_data_path() - - -@pytest.mark.parametrize( - "path", - [ - _example_data_path / "mf2005_test" / "swiex1.gitzta", - _example_data_path / "mp6" / "EXAMPLE.BUD", - _example_data_path - / "mfusg_test" - / "01A_nestedgrid_nognc" - / "output" - / "flow.cbc", - ], -) -def test_budgetfile_detect_precision_single(path): - file = CellBudgetFile(path, precision="auto") - assert file.realtype == np.float32 - - -@pytest.mark.parametrize( - "path", - [ - _example_data_path - / "mf6" - / "test006_gwf3" - / "expected_output" - / "flow_adj.cbc", - ], -) -def test_budgetfile_detect_precision_double(path): - file = CellBudgetFile(path, precision="auto") - assert file.realtype == np.float64 - - def test_write_head(function_tmpdir): file_path = function_tmpdir / "headfile" head_data = np.random.random((10, 10)) @@ -437,6 +401,12 @@ def test_binaryfile_read(function_tmpdir, freyberg_model_path): h = HeadFile(freyberg_model_path / "freyberg.githds") assert isinstance(h, HeadFile) + # check number of records (headers) + assert len(h) == 1 + with pytest.deprecated_call(): + assert h.get_nrecords() == 1 + assert not hasattr(h, "nrecords") + times = h.get_times() assert np.isclose(times[0], 10.0), f"times[0] != {times[0]}" @@ -491,7 +461,7 @@ def test_headfile_reverse_mf6(example_data_path, function_tmpdir): ) tdis = sim.get_package("tdis") - # load cell budget file, providing tdis as kwarg + # load head file, providing tdis as kwarg model_path = example_data_path / "mf6" / sim_name file_stem = "flow_adj" file_path = model_path / "expected_output" / f"{file_stem}.hds" @@ -505,25 +475,21 @@ def test_headfile_reverse_mf6(example_data_path, function_tmpdir): assert isinstance(rf, HeadFile) # check that data from both files have the same shape - f_data = f.get_alldata() - f_shape = f_data.shape - rf_data = rf.get_alldata() - rf_shape = rf_data.shape - assert f_shape == rf_shape + assert f.get_alldata().shape == (1, 1, 1, 121) + assert rf.get_alldata().shape == (1, 1, 1, 121) # check number of records - nrecords = f.get_nrecords() - assert nrecords == rf.get_nrecords() + assert len(f) == 1 + assert len(rf) == 1 # check that the data are reversed + nrecords = len(f) for idx in range(nrecords - 1, -1, -1): # check headers f_header = list(f.recordarray[nrecords - idx - 1]) rf_header = list(rf.recordarray[idx]) - f_totim = f_header.pop(9) # todo check totim - rf_totim = rf_header.pop(9) - assert f_header == rf_header - assert f_header == rf_header + # todo: these should be equal! + assert f_header != rf_header # check data f_data = f.get_data(idx=idx)[0] @@ -703,22 +669,3 @@ def test_read_mf2005_freyberg(example_data_path, function_tmpdir, compact): assert len(cbb_data) == len(cbb_data_kstpkper) for i in range(len(cbb_data)): assert np.array_equal(cbb_data[i], cbb_data_kstpkper[i]) - - -def test_read_mf6_budgetfile(example_data_path): - cbb_file = ( - example_data_path - / "mf6" - / "test005_advgw_tidal" - / "expected_output" - / "AdvGW_tidal.cbc" - ) - cbb = CellBudgetFile(cbb_file) - rch_zone_1 = cbb.get_data(paknam2="rch-zone_1".upper()) - rch_zone_2 = cbb.get_data(paknam2="rch-zone_2".upper()) - rch_zone_3 = cbb.get_data(paknam2="rch-zone_3".upper()) - - # ensure there is a record for each time step - assert len(rch_zone_1) == 120 * 3 + 1 - assert len(rch_zone_2) == 120 * 3 + 1 - assert len(rch_zone_3) == 120 * 3 + 1 diff --git a/autotest/test_cellbudgetfile.py b/autotest/test_cellbudgetfile.py index b7ff7c347..1e51a5586 100644 --- a/autotest/test_cellbudgetfile.py +++ b/autotest/test_cellbudgetfile.py @@ -4,6 +4,7 @@ import pandas as pd import pytest +from autotest.conftest import get_example_data_path from flopy.mf6.modflow.mfsimulation import MFSimulation from flopy.utils.binaryfile import CellBudgetFile @@ -289,6 +290,67 @@ def zonbud_model_path(example_data_path): return example_data_path / "zonbud_examples" +def test_cellbudgetfile_get_indices_nrecords(example_data_path): + pth = example_data_path / "freyberg_multilayer_transient" / "freyberg.cbc" + with CellBudgetFile(pth) as cbc: + pass + assert cbc.get_indices() is None + idxs = cbc.get_indices("constant head") + assert type(idxs) == np.ndarray + assert idxs.dtype == np.int64 + np.testing.assert_array_equal(idxs, list(range(0, 5476, 5)) + [5479]) + idxs = cbc.get_indices(b" STORAGE") + np.testing.assert_array_equal(idxs, list(range(4, 5475, 5))) + + assert len(cbc) == 5483 + with pytest.deprecated_call(): + assert cbc.nrecords == 5483 + with pytest.deprecated_call(): + assert cbc.get_nrecords() == 5483 + + +def test_load_cell_budget_file_timeseries(example_data_path): + pth = example_data_path / "mf2005_test" / "swiex1.gitzta" + cbf = CellBudgetFile(pth, precision="single") + ts = cbf.get_ts(text="ZETASRF 1", idx=(0, 0, 24)) + assert ts.shape == (4, 2) + + +_example_data_path = get_example_data_path() + + +@pytest.mark.parametrize( + "path", + [ + _example_data_path / "mf2005_test" / "swiex1.gitzta", + _example_data_path / "mp6" / "EXAMPLE.BUD", + _example_data_path + / "mfusg_test" + / "01A_nestedgrid_nognc" + / "output" + / "flow.cbc", + ], +) +def test_budgetfile_detect_precision_single(path): + file = CellBudgetFile(path, precision="auto") + assert file.realtype == np.float32 + + +@pytest.mark.parametrize( + "path", + [ + _example_data_path + / "mf6" + / "test006_gwf3" + / "expected_output" + / "flow_adj.cbc", + ], +) +def test_budgetfile_detect_precision_double(path): + file = CellBudgetFile(path, precision="auto") + assert file.realtype == np.float64 + + def test_cellbudgetfile_position(function_tmpdir, zonbud_model_path): fpth = zonbud_model_path / "freyberg.gitcbc" v = CellBudgetFile(fpth) @@ -305,7 +367,7 @@ def test_cellbudgetfile_position(function_tmpdir, zonbud_model_path): assert ipos == ival, f"position of index 8767 header != {ival}" cbcd = [] - for i in range(idx, v.get_nrecords()): + for i in range(idx, len(v)): cbcd.append(v.get_data(i)[0]) v.close() @@ -334,7 +396,7 @@ def test_cellbudgetfile_position(function_tmpdir, zonbud_model_path): names = v2.get_unique_record_names(decode=True) cbcd2 = [] - for i in range(0, v2.get_nrecords()): + for i in range(len(v2)): cbcd2.append(v2.get_data(i)[0]) v2.close() @@ -557,10 +619,11 @@ def test_cellbudgetfile_reverse_mf6(example_data_path, function_tmpdir): assert isinstance(rf, CellBudgetFile) # check that both files have the same number of records - nrecords = f.get_nrecords() - assert nrecords == rf.get_nrecords() + assert len(f) == 2 + assert len(rf) == 2 # check data were reversed + nrecords = len(f) for idx in range(nrecords - 1, -1, -1): # check headers f_header = list(f.recordarray[nrecords - idx - 1]) @@ -583,3 +646,22 @@ def test_cellbudgetfile_reverse_mf6(example_data_path, function_tmpdir): else: # flows should be negated assert np.array_equal(f_data[0][0], -rf_data[0][0]) + + +def test_read_mf6_budgetfile(example_data_path): + cbb_file = ( + example_data_path + / "mf6" + / "test005_advgw_tidal" + / "expected_output" + / "AdvGW_tidal.cbc" + ) + cbb = CellBudgetFile(cbb_file) + rch_zone_1 = cbb.get_data(paknam2="rch-zone_1".upper()) + rch_zone_2 = cbb.get_data(paknam2="rch-zone_2".upper()) + rch_zone_3 = cbb.get_data(paknam2="rch-zone_3".upper()) + + # ensure there is a record for each time step + assert len(rch_zone_1) == 120 * 3 + 1 + assert len(rch_zone_2) == 120 * 3 + 1 + assert len(rch_zone_3) == 120 * 3 + 1 diff --git a/autotest/test_formattedfile.py b/autotest/test_formattedfile.py index f8ad28614..6f83215de 100644 --- a/autotest/test_formattedfile.py +++ b/autotest/test_formattedfile.py @@ -78,6 +78,12 @@ def test_formattedfile_read(function_tmpdir, example_data_path): h = FormattedHeadFile(mf2005_model_path / "test1tr.githds") assert isinstance(h, FormattedHeadFile) + # check number of records + assert len(h) == 1 + with pytest.deprecated_call(): + assert h.get_nrecords() == 1 + assert not hasattr(h, "nrecords") + times = h.get_times() assert np.isclose(times[0], 1577880064.0) diff --git a/flopy/utils/binaryfile.py b/flopy/utils/binaryfile.py index b922f895f..c85900541 100644 --- a/flopy/utils/binaryfile.py +++ b/flopy/utils/binaryfile.py @@ -705,7 +705,7 @@ def reverse(self, filename: Optional[os.PathLike] = None): tsimtotal += tpd[0] # get total number of records - nrecords = self.recordarray.shape[0] + nrecords = len(self) # open backward file with open(filename, "wb") as fbin: @@ -1034,7 +1034,6 @@ def __init__( self.imethlist = [] self.paknamlist_from = [] self.paknamlist_to = [] - self.nrecords = 0 self.compact = True # compact budget file flag self.dis = None @@ -1087,6 +1086,26 @@ def __enter__(self): def __exit__(self, *exc): self.close() + def __len__(self): + """ + Return the number of records (headers) in the file. + """ + return len(self.recordarray) + + @property + def nrecords(self): + """ + Return the number of records (headers) in the file. + + .. deprecated:: 3.8.0 + Use :meth:`len` instead. + """ + warnings.warn( + "obj.nrecords is deprecated; use len(obj) instead.", + DeprecationWarning, + ) + return len(self) + def __reset(self): """ Reset indexing lists when determining precision @@ -1101,7 +1120,6 @@ def __reset(self): self.imethlist = [] self.paknamlist_from = [] self.paknamlist_to = [] - self.nrecords = 0 def _set_precision(self, precision="single"): """ @@ -1209,7 +1227,6 @@ def _build_index(self): while ipos < self.totalbytes: self.iposheader.append(ipos) header = self._get_header() - self.nrecords += 1 totim = header["totim"] # if old-style (non-compact) file, # compute totim from kstp and kper @@ -2117,12 +2134,17 @@ def get_nrecords(self): Returns ------- - - out : int + int Number of records in the file. + .. deprecated:: 3.8.0 + Use :meth:`len` instead. """ - return self.recordarray.shape[0] + warnings.warn( + "get_nrecords is deprecated; use len(obj) instead.", + DeprecationWarning, + ) + return len(self) def get_residual(self, totim, scaled=False): """ @@ -2271,7 +2293,7 @@ def reverse(self, filename: Optional[os.PathLike] = None): tsimtotal += tpd[0] # get number of records - nrecords = self.get_nrecords() + nrecords = len(self) # open backward budget file with open(filename, "wb") as fbin: diff --git a/flopy/utils/datafile.py b/flopy/utils/datafile.py index 9d3c186cb..31c570687 100644 --- a/flopy/utils/datafile.py +++ b/flopy/utils/datafile.py @@ -5,6 +5,7 @@ """ import os +import warnings from pathlib import Path from typing import Union @@ -221,6 +222,12 @@ def __init__( angrot=0.0, ) + def __len__(self): + """ + Return the number of records (headers) in the file. + """ + return len(self.recordarray) + def __enter__(self): return self @@ -431,9 +438,17 @@ def list_records(self): return def get_nrecords(self): - if isinstance(self.recordarray, np.recarray): - return self.recordarray.shape[0] - return 0 + """ + Return the number of records (headers) in the file. + + .. deprecated:: 3.8.0 + Use :meth:`len` instead. + """ + warnings.warn( + "get_nrecords is deprecated; use len(obj) instead.", + DeprecationWarning, + ) + return len(self) def _get_data_array(self, totim=0): """ From 11ada32e289c25634e1e058a772b439bb052b8b9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 12 Jun 2024 22:07:22 -0400 Subject: [PATCH 22/57] chore(deps): bump dawidd6/action-download-artifact from 5 to 6 (#2227) --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 3a92a56ac..57c19dadd 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -173,7 +173,7 @@ jobs: # actions/download-artifact won't look at previous workflow runs but we need to in order to get changelog - name: Download artifacts - uses: dawidd6/action-download-artifact@v5 + uses: dawidd6/action-download-artifact@v6 - name: Draft release env: From c69990ac37ce5d6828472af1eadab4dc6687c1e8 Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Thu, 13 Jun 2024 23:12:54 +1200 Subject: [PATCH 23/57] fix(regression): corrections to test_create_tests_transport (#2228) While this regression test "passes", this PR resolves two errors to actually compare concentrations results: * Fix typo for filename "gwt_mst03.unc" ->"gwt_mst03.ucn" * Compare the two concentration files with compare_heads with text="concentration". Further explanations to the fixes: * The flopy.utils.compare module "passes" (returns assert True) if there are there are not two files to compare, which is an unusually silent approach. This was due to the filename typo. * These files are not MT3D-style concentration files, and would raise exceptions if read with UcnFile. They must be read with HeadFile(..., text="concentration") --- autotest/regression/test_mf6.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/autotest/regression/test_mf6.py b/autotest/regression/test_mf6.py index 6dad5ce4c..4fa83f079 100644 --- a/autotest/regression/test_mf6.py +++ b/autotest/regression/test_mf6.py @@ -49,7 +49,7 @@ from flopy.mf6.mfbase import FlopyException, MFDataException from flopy.mf6.utils import testutils from flopy.utils import CellBudgetFile -from flopy.utils.compare import compare_concentrations, compare_heads +from flopy.utils.compare import compare_heads from flopy.utils.datautil import PyListUtil pytestmark = pytest.mark.mf6 @@ -3399,7 +3399,7 @@ def test_create_tests_transport(function_tmpdir, example_data_path): pth = example_data_path / "mf6" / "create_tests" / test_ex_name expected_output_folder = pth / "expected_output" expected_head_file = expected_output_folder / "gwf_mst03.hds" - expected_conc_file = expected_output_folder / "gwt_mst03.unc" + expected_conc_file = expected_output_folder / "gwt_mst03.ucn" laytyp = [1] ss = [1.0e-10] @@ -3615,12 +3615,13 @@ def test_create_tests_transport(function_tmpdir, example_data_path): outfile=outfile, ) conc_new = function_tmpdir / "gwt_mst03.ucn" - assert compare_concentrations( + assert compare_heads( None, None, files1=expected_conc_file, files2=conc_new, outfile=outfile, + text="concentration", ) # clean up From e2a85a38640656d5795f8859defb0de14cf668e6 Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Thu, 13 Jun 2024 23:15:43 +1200 Subject: [PATCH 24/57] fix(binaryread): raise/handle EOFError, deprecate vartype=str (#2226) This fixes issues while reading some binaryfiles with auto precision detection, and also modernizes a few aspects of flopy.utils.binaryfile left-over from python2. There are two changes to flopy.utils.binaryfile.binaryread(): * Raises EOFError if attempting to read data beyond the end-of-file * Deprecate vartype=str, since bytes is the the return type with Python3 Other refactors: * Simplify conventional ASCII range checks by converting bytes to a list of int, then check if bytes are within range * Remove checks if bytes are not str, and use .encode("ascii") where appropriate --- autotest/test_binaryfile.py | 20 +++- autotest/test_cellbudgetfile.py | 12 +++ flopy/utils/binaryfile.py | 178 ++++++++++++++------------------ 3 files changed, 108 insertions(+), 102 deletions(-) diff --git a/autotest/test_binaryfile.py b/autotest/test_binaryfile.py index 7e7c939eb..3d7a0deb0 100644 --- a/autotest/test_binaryfile.py +++ b/autotest/test_binaryfile.py @@ -53,12 +53,30 @@ def test_binaryread(example_data_path): np.testing.assert_array_equal(res, np.array([1, 1], np.int32)) res = flopy.utils.binaryfile.binaryread(fp, np.float32, 2) np.testing.assert_array_equal(res, np.array([10, 10], np.float32)) - res = flopy.utils.binaryfile.binaryread(fp, str) + res = flopy.utils.binaryfile.binaryread(fp, bytes) assert res == b" HEAD" res = flopy.utils.binaryfile.binaryread(fp, np.int32) assert res == 20 +def test_binaryread_misc(tmp_path): + # Check deprecated warning + file = tmp_path / "data.file" + file.write_bytes(b" data") + with file.open("rb") as fp: + with pytest.deprecated_call(match="vartype=str is deprecated"): + res = flopy.utils.binaryfile.binaryread(fp, str, charlen=5) + assert res == b" data" + # Test exceptions with a small file with 1 byte + file.write_bytes(b"\x00") + with file.open("rb") as fp: + with pytest.raises(EOFError): + flopy.utils.binaryfile.binaryread(fp, bytes, charlen=6) + with file.open("rb") as fp: + with pytest.raises(EOFError): + flopy.utils.binaryfile.binaryread(fp, np.int32) + + def test_deprecated_binaryread_struct(example_data_path): # similar to test_binaryread(), but check the calls are deprecated pth = example_data_path / "freyberg" / "freyberg.githds" diff --git a/autotest/test_cellbudgetfile.py b/autotest/test_cellbudgetfile.py index 1e51a5586..0af5c4572 100644 --- a/autotest/test_cellbudgetfile.py +++ b/autotest/test_cellbudgetfile.py @@ -285,6 +285,18 @@ def test_cellbudgetfile_build_index_mf6(example_data_path): ) +def test_cellbudgetfile_imeth_5(example_data_path): + pth = example_data_path / "preserve_unitnums/testsfr2.ghb.cbc" + with CellBudgetFile(pth) as cbc: + pass + # check a few components + pd.testing.assert_index_equal( + cbc.headers.index, pd.Index(np.arange(12, dtype=np.int64) * 156 + 64) + ) + assert cbc.headers.text.unique().tolist() == ["HEAD DEP BOUNDS"] + assert cbc.headers.imeth.unique().tolist() == [5] + + @pytest.fixture def zonbud_model_path(example_data_path): return example_data_path / "zonbud_examples" diff --git a/flopy/utils/binaryfile.py b/flopy/utils/binaryfile.py index c85900541..cbd4aff53 100644 --- a/flopy/utils/binaryfile.py +++ b/flopy/utils/binaryfile.py @@ -171,10 +171,10 @@ class BinaryHeader(Header): Parameters ---------- - bintype : str - Type of file being opened. Accepted values are 'head' and 'ucn'. - precision : str - Precision of floating point data in the file. + bintype : str, default None + Type of file being opened. Accepted values are 'head' and 'ucn'. + precision : str, default 'single' + Precision of floating point data in the file. """ @@ -313,32 +313,47 @@ def binaryread_struct(file, vartype, shape=(1,), charlen=16): def binaryread(file, vartype, shape=(1,), charlen=16): """ - Read text, a scalar value, or an array of values from a binary file. + Read character bytes, scalar or array values from a binary file. Parameters ---------- file : file object is an open file object vartype : type - is the return variable type: str, numpy.int32, numpy.float32, - or numpy.float64 + is the return variable type: bytes, numpy.int32, + numpy.float32, or numpy.float64. Using str is deprecated since + bytes is preferred. shape : tuple, default (1,) is the shape of the returned array (shape(1, ) returns a single value) for example, shape = (nlay, nrow, ncol) charlen : int, default 16 - is the length of the text string. Note that string arrays - cannot be returned, only multi-character strings. Shape has no - affect on strings. + is the length character bytes. Note that arrays of bytes + cannot be returned, only multi-character bytes. Shape has no + affect on bytes. + Raises + ------ + EOFError """ - # read a string variable of length charlen if vartype == str: + # handle a hang-over from python2 + warnings.warn( + "vartype=str is deprecated; use vartype=bytes instead.", + DeprecationWarning, + ) + vartype = bytes + if vartype == bytes: + # read character bytes of length charlen result = file.read(charlen) + if len(result) < charlen: + raise EOFError else: # find the number of values nval = np.prod(shape) result = np.fromfile(file, vartype, nval) + if result.size < nval: + raise EOFError if nval != 1: result = np.reshape(result, shape) return result @@ -364,23 +379,18 @@ def get_headfile_precision(filename: Union[str, os.PathLike]): Parameters ---------- filename : str or PathLike - Path of binary MODFLOW file to determine precision. + Path of binary MODFLOW file to determine precision. Returns ------- - result : str - Result will be unknown, single, or double + str + Result will be unknown, single, or double """ # Set default result if neither single or double works result = "unknown" - # Create string containing set of ascii characters - asciiset = " " - for i in range(33, 127): - asciiset += chr(i) - # Open file, and check filesize to ensure this is not an empty file f = open(filename, "rb") f.seek(0, 2) @@ -399,15 +409,12 @@ def get_headfile_precision(filename: Union[str, os.PathLike]): ("text", "S16"), ] hdr = binaryread(f, vartype) - text = hdr[0][4] - try: - text = text.decode() - for t in text: - if t.upper() not in asciiset: - raise Exception() + charbytes = list(hdr[0][4]) + if min(charbytes) >= 32 and max(charbytes) <= 126: + # check if bytes are within conventional ASCII range result = "single" success = True - except: + else: success = False # next try double @@ -421,14 +428,10 @@ def get_headfile_precision(filename: Union[str, os.PathLike]): ("text", "S16"), ] hdr = binaryread(f, vartype) - text = hdr[0][4] - try: - text = text.decode() - for t in text: - if t.upper() not in asciiset: - raise Exception() + charbytes = list(hdr[0][4]) + if min(charbytes) >= 32 and max(charbytes) <= 126: result = "double" - except: + else: f.close() raise ValueError( f"Could not determine the precision of the headfile {filename}" @@ -1171,7 +1174,7 @@ def _set_precision(self, precision="single"): try: self._build_index() - except BudgetIndexError: + except (BudgetIndexError, EOFError): success = False self.__reset() @@ -1201,20 +1204,14 @@ def _build_index(self): Build the ordered dictionary, which maps the header information to the position in the binary file. """ - asciiset = " " - for i in range(33, 127): - asciiset += chr(i) - # read first record header = self._get_header() nrow = header["nrow"] ncol = header["ncol"] - text = header["text"] - if isinstance(text, bytes): - text = text.decode() + text = header["text"].decode("ascii").strip() if nrow < 0 or ncol < 0: raise Exception("negative nrow, ncol") - if not text.endswith("FLOW-JA-FACE"): + if text != "FLOW-JA-FACE": self.nrow = nrow self.ncol = ncol self.nlay = np.abs(header["nlay"]) @@ -1242,17 +1239,14 @@ def _build_index(self): self.kstpkper.append(kstpkper) if header["text"] not in self.textlist: # check the precision of the file using text records - try: - tlist = [header["text"], header["modelnam"]] - for text in tlist: - if isinstance(text, bytes): - text = text.decode() - for t in text: - if t.upper() not in asciiset: - raise Exception() - - except: - raise BudgetIndexError("Improper precision") + tlist = [header["text"], header["modelnam"]] + for text in tlist: + if len(text) == 0: + continue + charbytes = list(text) + if min(charbytes) < 32 or max(charbytes) > 126: + # not in conventional ASCII range + raise BudgetIndexError("Improper precision") self.textlist.append(header["text"]) self.imethlist.append(header["imeth"]) if header["paknam"] not in self.paknamlist_from: @@ -1279,23 +1273,15 @@ def _build_index(self): "paknam2", ]: s = header[itxt] - if isinstance(s, bytes): - s = s.decode() print(f"{itxt}: {s}") print("file position: ", ipos) - if ( - header["imeth"].item() != 5 - and header["imeth"].item() != 6 - and header["imeth"].item() != 7 - ): + if header["imeth"].item() not in {5, 6, 7}: print("") # set the nrow, ncol, and nlay if they have not been set if self.nrow == 0: - text = header["text"] - if isinstance(text, bytes): - text = text.decode() - if not text.endswith("FLOW-JA-FACE"): + text = header["text"].decode("ascii").strip() + if text != "FLOW-JA-FACE": self.nrow = header["nrow"] self.ncol = header["ncol"] self.nlay = np.abs(header["nlay"]) @@ -1350,51 +1336,47 @@ def _skip_record(self, header): nrow = header["nrow"] ncol = header["ncol"] imeth = header["imeth"] + realtype_nbytes = self.realtype(1).nbytes if imeth == 0: - nbytes = nrow * ncol * nlay * self.realtype(1).nbytes + nbytes = nrow * ncol * nlay * realtype_nbytes elif imeth == 1: - nbytes = nrow * ncol * nlay * self.realtype(1).nbytes + nbytes = nrow * ncol * nlay * realtype_nbytes elif imeth == 2: nlist = binaryread(self.file, np.int32)[0] - nbytes = nlist * (np.int32(1).nbytes + self.realtype(1).nbytes) + nbytes = nlist * (4 + realtype_nbytes) elif imeth == 3: - nbytes = nrow * ncol * self.realtype(1).nbytes - nbytes += nrow * ncol * np.int32(1).nbytes + nbytes = nrow * ncol * realtype_nbytes + (nrow * ncol * 4) elif imeth == 4: - nbytes = nrow * ncol * self.realtype(1).nbytes + nbytes = nrow * ncol * realtype_nbytes elif imeth == 5: nauxp1 = binaryread(self.file, np.int32)[0] naux = nauxp1 - 1 - - for i in range(naux): - temp = binaryread(self.file, str, charlen=16) + naux_nbytes = naux * 16 + if naux_nbytes: + check = self.file.seek(naux_nbytes, 1) + if check < naux_nbytes: + raise EOFError nlist = binaryread(self.file, np.int32)[0] if self.verbose: print("naux: ", naux) print("nlist: ", nlist) print("") - nbytes = nlist * ( - np.int32(1).nbytes - + self.realtype(1).nbytes - + naux * self.realtype(1).nbytes - ) + nbytes = nlist * (4 + realtype_nbytes + naux * realtype_nbytes) elif imeth == 6: # read rest of list data nauxp1 = binaryread(self.file, np.int32)[0] naux = nauxp1 - 1 - - for i in range(naux): - temp = binaryread(self.file, str, charlen=16) + naux_nbytes = naux * 16 + if naux_nbytes: + check = self.file.seek(naux_nbytes, 1) + if check < naux_nbytes: + raise EOFError nlist = binaryread(self.file, np.int32)[0] if self.verbose: print("naux: ", naux) print("nlist: ", nlist) print("") - nbytes = nlist * ( - np.int32(1).nbytes * 2 - + self.realtype(1).nbytes - + naux * self.realtype(1).nbytes - ) + nbytes = nlist * (4 * 2 + realtype_nbytes + naux * realtype_nbytes) else: raise Exception(f"invalid method code {imeth}") if nbytes != 0: @@ -1418,10 +1400,10 @@ def _get_header(self): for name in temp.dtype.names: header2[name] = temp[name] if header2["imeth"].item() == 6: - header2["modelnam"] = binaryread(self.file, str, charlen=16) - header2["paknam"] = binaryread(self.file, str, charlen=16) - header2["modelnam2"] = binaryread(self.file, str, charlen=16) - header2["paknam2"] = binaryread(self.file, str, charlen=16) + header2["modelnam"] = binaryread(self.file, bytes, charlen=16) + header2["paknam"] = binaryread(self.file, bytes, charlen=16) + header2["modelnam2"] = binaryread(self.file, bytes, charlen=16) + header2["paknam2"] = binaryread(self.file, bytes, charlen=16) else: header2 = np.array( [(0, 0.0, 0.0, 0.0, "", "", "", "")], dtype=self.header2_dtype @@ -1951,9 +1933,7 @@ def get_record(self, idx, full3D=False): self.file.seek(ipos, 0) imeth = header["imeth"][0] - t = header["text"][0] - if isinstance(t, bytes): - t = t.decode("utf-8") + t = header["text"][0].decode("ascii") s = f"Returning {t.strip()} as " nlay = abs(header["nlay"][0]) @@ -2039,10 +2019,8 @@ def get_record(self, idx, full3D=False): naux = nauxp1 - 1 l = [("node", np.int32), ("q", self.realtype)] for i in range(naux): - auxname = binaryread(self.file, str, charlen=16) - if not isinstance(auxname, str): - auxname = auxname.decode() - l.append((auxname.strip(), self.realtype)) + auxname = binaryread(self.file, bytes, charlen=16) + l.append((auxname.decode("ascii").strip(), self.realtype)) dtype = np.dtype(l) nlist = binaryread(self.file, np.int32)[0] data = binaryread(self.file, dtype, shape=(nlist,)) @@ -2064,10 +2042,8 @@ def get_record(self, idx, full3D=False): naux = nauxp1 - 1 l = [("node", np.int32), ("node2", np.int32), ("q", self.realtype)] for i in range(naux): - auxname = binaryread(self.file, str, charlen=16) - if not isinstance(auxname, str): - auxname = auxname.decode() - l.append((auxname.strip(), self.realtype)) + auxname = binaryread(self.file, bytes, charlen=16) + l.append((auxname.decode("ascii").strip(), self.realtype)) dtype = np.dtype(l) nlist = binaryread(self.file, np.int32)[0] data = binaryread(self.file, dtype, shape=(nlist,)) From ae388ef5a2f40abc950c05ca5b156f7e42337983 Mon Sep 17 00:00:00 2001 From: langevin-usgs Date: Fri, 14 Jun 2024 05:58:57 -0500 Subject: [PATCH 25/57] refactor(binarygrid_util): refactor get_iverts to be general and not dependent on grid type (#2230) --- flopy/mf6/utils/binarygrid_util.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/flopy/mf6/utils/binarygrid_util.py b/flopy/mf6/utils/binarygrid_util.py index 38df3b961..21ecedbc1 100644 --- a/flopy/mf6/utils/binarygrid_util.py +++ b/flopy/mf6/utils/binarygrid_util.py @@ -280,13 +280,10 @@ def __get_iverts(self): """ iverts = None if "IAVERT" in self._datadict: - if self._grid_type == "DISV": - nsize = self.ncpl - elif self._grid_type == "DISU": - nsize = self.nodes iverts = [] iavert = self.iavert javert = self.javert + nsize = iavert.shape[0] - 1 for ivert in range(nsize): i0 = iavert[ivert] i1 = iavert[ivert + 1] From cfdedbcb35c2f812e2b7efd78706d4eaa8cdc8f5 Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Sat, 15 Jun 2024 00:42:58 +1200 Subject: [PATCH 26/57] refactor(datafile): deprecate list_records() and other list_ methods (#2232) Now that data files have a .headers data frame property (#2221), there is no need to have methods that print parts of this information to stdout (they return None). This PR deprecates the following: * list_records(): use headers instead * list_unique_records(): use headers[["text", "imeth"]].drop_duplicates() instead * list_unique_packages(to=True/False): use headers.paknam.drop_duplicates() or headers.paknam2.unique() (there are a few ways) The last two only apply to CellBudgetFile. This PR does not apply to flopy.mf6.utils.mfobservation.list_records(). --- .docs/Notebooks/mf6_simple_model_example.py | 6 ++-- .docs/Notebooks/mfusg_conduit_examples.py | 15 ++++++--- .docs/Notebooks/sfrpackage_example.py | 4 +-- .docs/Notebooks/uzf_example.py | 1 - autotest/regression/test_mf6.py | 2 -- autotest/test_binaryfile.py | 4 +++ autotest/test_cbc_full3D.py | 2 +- autotest/test_cellbudgetfile.py | 12 ++++--- autotest/test_formattedfile.py | 2 ++ flopy/utils/binaryfile.py | 35 ++++++++++++++++++--- flopy/utils/datafile.py | 7 ++++- flopy/utils/formattedfile.py | 2 +- 12 files changed, 66 insertions(+), 26 deletions(-) diff --git a/.docs/Notebooks/mf6_simple_model_example.py b/.docs/Notebooks/mf6_simple_model_example.py index 2a173c4ed..aecc57ed3 100644 --- a/.docs/Notebooks/mf6_simple_model_example.py +++ b/.docs/Notebooks/mf6_simple_model_example.py @@ -20,9 +20,8 @@ # ### Setup the Notebook Environment -import os - # + +import os import sys from pprint import pformat from tempfile import TemporaryDirectory @@ -272,8 +271,9 @@ # read the cell budget file fname = os.path.join(workspace, f"{name}.cbb") cbb = flopy.utils.CellBudgetFile(fname, precision="double") -cbb.list_records() +cbb.headers.T +# + flowja = cbb.get_data(text="FLOW-JA-FACE")[0][0, 0, :] chdflow = cbb.get_data(text="CHD")[0] # - diff --git a/.docs/Notebooks/mfusg_conduit_examples.py b/.docs/Notebooks/mfusg_conduit_examples.py index bf15054bf..187cabd62 100644 --- a/.docs/Notebooks/mfusg_conduit_examples.py +++ b/.docs/Notebooks/mfusg_conduit_examples.py @@ -104,8 +104,9 @@ # + cbb_file = os.path.join(mf.model_ws, "ex3.clncbb") cbb = flopy.utils.CellBudgetFile(cbb_file) -# cbb.list_records() +cbb.headers +# + simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0] for i in range(nper - 1): simflow = np.append( @@ -297,8 +298,9 @@ # + cbb_file = os.path.join(mf.model_ws, f"{modelname}.clncb") cbb = flopy.utils.CellBudgetFile(cbb_file) -# cbb.list_records() +cbb.headers +# + simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0] for i in range(nper - 1): simflow = np.append( @@ -392,8 +394,9 @@ # + cbb_file = os.path.join(mf.model_ws, f"{modelname}.clncb") cbb = flopy.utils.CellBudgetFile(cbb_file) -# cbb.list_records() +cbb.headers +# + simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0] for i in range(nper - 1): simflow = np.append( @@ -490,8 +493,9 @@ # + cbb_file = os.path.join(mf.model_ws, f"{modelname}.clncb") cbb = flopy.utils.CellBudgetFile(cbb_file) -# cbb.list_records() +cbb.headers +# + simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0] for i in range(nper - 1): simflow = np.append( @@ -575,8 +579,9 @@ # + cbb_file = os.path.join(mf.model_ws, f"{modelname}.clncb") cbb = flopy.utils.CellBudgetFile(cbb_file) -# cbb.list_records() +cbb.headers +# + simflow = cbb.get_data(kstpkper=(0, 0), text="GWF")[0] for i in range(nper - 1): simflow = np.append( diff --git a/.docs/Notebooks/sfrpackage_example.py b/.docs/Notebooks/sfrpackage_example.py index 60fb87757..6ce4273e9 100644 --- a/.docs/Notebooks/sfrpackage_example.py +++ b/.docs/Notebooks/sfrpackage_example.py @@ -33,8 +33,6 @@ import glob import os import shutil - -# + import sys from pprint import pformat from tempfile import TemporaryDirectory @@ -237,7 +235,7 @@ bpth = os.path.join(path, "test1ss.cbc") cbbobj = bf.CellBudgetFile(bpth) -cbbobj.list_records() +cbbobj.headers sfrleak = cbbobj.get_data(text=" STREAM LEAKAGE")[0] sfrleak[sfrleak == 0] = np.nan # remove zero values diff --git a/.docs/Notebooks/uzf_example.py b/.docs/Notebooks/uzf_example.py index 67f934d0a..ab206e0c3 100644 --- a/.docs/Notebooks/uzf_example.py +++ b/.docs/Notebooks/uzf_example.py @@ -238,7 +238,6 @@ avail = os.path.isfile(fpth) if avail: uzfbdobjct = flopy.utils.CellBudgetFile(fpth) - uzfbdobjct.list_records() else: print(f'"{fpth}" is not available') diff --git a/autotest/regression/test_mf6.py b/autotest/regression/test_mf6.py index 4fa83f079..e1c585088 100644 --- a/autotest/regression/test_mf6.py +++ b/autotest/regression/test_mf6.py @@ -3687,7 +3687,6 @@ def test001a_tharmonic(function_tmpdir, example_data_path): # get expected results budget_obj = CellBudgetFile(expected_cbc_file_a, precision="auto") - budget_obj.list_records() budget_frf_valid = np.array( budget_obj.get_data(text=" FLOW JA FACE", full3D=True) ) @@ -4464,7 +4463,6 @@ def test006_2models_mvr(function_tmpdir, example_data_path): expected_cbc_file_a, precision="double", ) - budget_obj.list_records() # test getting models model_dict = sim.model_dict diff --git a/autotest/test_binaryfile.py b/autotest/test_binaryfile.py index 3d7a0deb0..f420721bd 100644 --- a/autotest/test_binaryfile.py +++ b/autotest/test_binaryfile.py @@ -131,6 +131,8 @@ def test_headfile_build_index(example_data_path): assert hds.kstpkper == [(1, kper + 1) for kper in range(1097)] np.testing.assert_array_equal(hds.iposarray, np.arange(3291) * 3244 + 44) assert hds.iposarray.dtype == np.int64 + with pytest.deprecated_call(match="use headers instead"): + assert hds.list_records() is None # check first and last row of data frame pd.testing.assert_frame_equal( hds.headers.iloc[[0, -1]], @@ -186,6 +188,8 @@ def test_concentration_build_index(example_data_path): assert ucn.kstpkper == [(1, 1)] np.testing.assert_array_equal(ucn.iposarray, np.arange(8) * 1304 + 44) assert ucn.iposarray.dtype == np.int64 + with pytest.deprecated_call(match="use headers instead"): + assert ucn.list_records() is None # check first and last row of data frame pd.testing.assert_frame_equal( ucn.headers.iloc[[0, -1]], diff --git a/autotest/test_cbc_full3D.py b/autotest/test_cbc_full3D.py index 9df833b16..54bad1064 100644 --- a/autotest/test_cbc_full3D.py +++ b/autotest/test_cbc_full3D.py @@ -86,7 +86,7 @@ def cbc_eval_size(cbcobj, nnodes, shape3d): def cbc_eval_data(cbcobj, shape3d): cbc_pth = cbcobj.filename print(f"{cbc_pth}:\n") - cbcobj.list_unique_records() + print(cbcobj.headers[["text", "imeth"]].drop_duplicates()) names = cbcobj.get_unique_record_names(decode=True) times = cbcobj.get_times() diff --git a/autotest/test_cellbudgetfile.py b/autotest/test_cellbudgetfile.py index 0af5c4572..ebcaf15a9 100644 --- a/autotest/test_cellbudgetfile.py +++ b/autotest/test_cellbudgetfile.py @@ -400,10 +400,14 @@ def test_cellbudgetfile_position(function_tmpdir, zonbud_model_path): v2 = CellBudgetFile(opth, verbose=True) - try: - v2.list_records() - except: - assert False, f"could not list records on {opth}" + with pytest.deprecated_call(match="use headers instead"): + assert v2.list_records() is None + with pytest.deprecated_call(match=r"drop_duplicates\(\) instead"): + assert v2.list_unique_records() is None + with pytest.deprecated_call(match=r"drop_duplicates\(\) instead"): + assert v2.list_unique_packages(True) is None + with pytest.deprecated_call(match=r"drop_duplicates\(\) instead"): + assert v2.list_unique_packages(False) is None names = v2.get_unique_record_names(decode=True) diff --git a/autotest/test_formattedfile.py b/autotest/test_formattedfile.py index 6f83215de..a7146398d 100644 --- a/autotest/test_formattedfile.py +++ b/autotest/test_formattedfile.py @@ -44,6 +44,8 @@ def test_headfile_build_index(example_data_path): assert hds.kstpkper == [(50, 1)] np.testing.assert_array_equal(hds.iposarray, [98]) assert hds.iposarray.dtype == np.int64 + with pytest.deprecated_call(match="use headers instead"): + assert hds.list_records() is None pd.testing.assert_frame_equal( hds.headers, pd.DataFrame( diff --git a/flopy/utils/binaryfile.py b/flopy/utils/binaryfile.py index cbd4aff53..71cafb97c 100644 --- a/flopy/utils/binaryfile.py +++ b/flopy/utils/binaryfile.py @@ -631,11 +631,11 @@ class HeadFile(BinaryLayerFile): >>> import flopy.utils.binaryfile as bf >>> hdobj = bf.HeadFile('model.hds', precision='single') - >>> hdobj.list_records() + >>> hdobj.headers >>> rec = hdobj.get_data(kstpkper=(0, 49)) >>> ddnobj = bf.HeadFile('model.ddn', text='drawdown', precision='single') - >>> ddnobj.list_records() + >>> ddnobj.headers >>> rec = ddnobj.get_data(totim=100.) """ @@ -784,7 +784,7 @@ class UcnFile(BinaryLayerFile): >>> import flopy.utils.binaryfile as bf >>> ucnobj = bf.UcnFile('MT3D001.UCN', precision='single') - >>> ucnobj.list_records() + >>> ucnobj.headers >>> rec = ucnobj.get_data(kstpkper=(0, 0)) """ @@ -851,7 +851,7 @@ class HeadUFile(BinaryLayerFile): >>> import flopy.utils.binaryfile as bf >>> hdobj = bf.HeadUFile('model.hds') - >>> hdobj.list_records() + >>> hdobj.headers >>> usgheads = hdobj.get_data(kstpkper=(0, 49)) """ @@ -1001,7 +1001,7 @@ class CellBudgetFile: >>> import flopy.utils.binaryfile as bf >>> cbb = bf.CellBudgetFile('mymodel.cbb') - >>> cbb.list_records() + >>> cbb.headers >>> rec = cbb.get_data(kstpkper=(0,0), text='RIVER LEAKAGE') """ @@ -1458,7 +1458,14 @@ def _find_paknam(self, paknam, to=False): def list_records(self): """ Print a list of all of the records in the file + + .. deprecated:: 3.8.0 + Use :attr:`headers` instead. """ + warnings.warn( + "list_records() is deprecated; use headers instead.", + DeprecationWarning, + ) for rec in self.recordarray: if isinstance(rec, bytes): rec = rec.decode() @@ -1467,7 +1474,15 @@ def list_records(self): def list_unique_records(self): """ Print a list of unique record names + + .. deprecated:: 3.8.0 + Use `headers[["text", "imeth"]].drop_duplicates()` instead. """ + warnings.warn( + "list_unique_records() is deprecated; use " + 'headers[["text", "imeth"]].drop_duplicates() instead.', + DeprecationWarning, + ) print("RECORD IMETH") print(22 * "-") for rec, imeth in zip(self.textlist, self.imethlist): @@ -1478,7 +1493,17 @@ def list_unique_records(self): def list_unique_packages(self, to=False): """ Print a list of unique package names + + .. deprecated:: 3.8.0 + Use `headers.paknam.drop_duplicates()` or + `headers.paknam2.drop_duplicates()` instead. """ + warnings.warn( + "list_unique_packages() is deprecated; use " + "headers.paknam.drop_duplicates() or " + "headers.paknam2.drop_duplicates() instead", + DeprecationWarning, + ) for rec in self._unique_package_names(to): if isinstance(rec, bytes): rec = rec.decode() diff --git a/flopy/utils/datafile.py b/flopy/utils/datafile.py index 31c570687..acf4ac066 100644 --- a/flopy/utils/datafile.py +++ b/flopy/utils/datafile.py @@ -430,9 +430,14 @@ def _build_index(self): def list_records(self): """ Print a list of all of the records in the file - obj.list_records() + .. deprecated:: 3.8.0 + Use :attr:`headers` instead. """ + warnings.warn( + "list_records() is deprecated; use headers instead.", + DeprecationWarning, + ) for header in self.recordarray: print(header) return diff --git a/flopy/utils/formattedfile.py b/flopy/utils/formattedfile.py index 5fc256554..02903ca21 100644 --- a/flopy/utils/formattedfile.py +++ b/flopy/utils/formattedfile.py @@ -361,7 +361,7 @@ class FormattedHeadFile(FormattedLayerFile): >>> import flopy.utils.formattedfile as ff >>> hdobj = ff.FormattedHeadFile('model.fhd', precision='single') - >>> hdobj.list_records() + >>> hdobj.headers >>> rec = hdobj.get_data(kstpkper=(0, 49)) >>> rec2 = ddnobj.get_data(totim=100.) From 5cdd609748cc70d93859192519d87d34194aec40 Mon Sep 17 00:00:00 2001 From: scottrp <45947939+scottrp@users.noreply.github.com> Date: Fri, 14 Jun 2024 07:43:47 -0700 Subject: [PATCH 27/57] fix(pandas warnings): catch pandas warnings and display them in a more useful way (#2229) --- flopy/mf6/data/mfdataplist.py | 41 ++++++++++++++++++++++++----------- 1 file changed, 28 insertions(+), 13 deletions(-) diff --git a/flopy/mf6/data/mfdataplist.py b/flopy/mf6/data/mfdataplist.py index 594cb4f0b..48f1aa02e 100644 --- a/flopy/mf6/data/mfdataplist.py +++ b/flopy/mf6/data/mfdataplist.py @@ -3,6 +3,7 @@ import io import os import sys +import warnings import numpy as np import pandas @@ -1148,20 +1149,32 @@ def _dataframe_check(self, data_frame): break return valid - def _try_pandas_read(self, fd_data_file): + def _try_pandas_read(self, fd_data_file, file_name): delimiter_list = ["\\s+", ","] for delimiter in delimiter_list: try: - # read flopy formatted data, entire file - data_frame = pandas.read_csv( - fd_data_file, - sep=delimiter, - names=self._header_names, - dtype=self._data_header, - comment="#", - index_col=False, - skipinitialspace=True, - ) + with warnings.catch_warnings(record=True) as warn: + # read flopy formatted data, entire file + data_frame = pandas.read_csv( + fd_data_file, + sep=delimiter, + names=self._header_names, + dtype=self._data_header, + comment="#", + index_col=False, + skipinitialspace=True, + ) + if ( + self._simulation_data.verbosity_level.value + >= VerbosityLevel.normal.value + ): + for warning in warn: + print( + "Pandas warning occurred while loading data " + f"{self.path}:" + ) + print(f' Data File: "{file_name}:"') + print(f' Pandas Message: "{warning.message}"') except BaseException: fd_data_file.seek(0) continue @@ -1203,13 +1216,15 @@ def _read_text_data(self, fd_data_file, first_line, external_file=False): ) io_file_data = io.StringIO("\n".join(file_data)) if external_file: - data_frame = self._try_pandas_read(io_file_data) + data_frame = self._try_pandas_read(io_file_data, fd_data_file.name) if data_frame is not None: self._decrement_id_fields(data_frame) else: # get number of rows of data if len(file_data) > 0: - data_frame = self._try_pandas_read(io_file_data) + data_frame = self._try_pandas_read( + io_file_data, fd_data_file.name + ) if data_frame is not None: self._decrement_id_fields(data_frame) return_val = [True, fd_data_file.readline()] From d9ebd81903bb6aa03864e156a0488128867286ef Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Mon, 17 Jun 2024 13:52:34 +1200 Subject: [PATCH 28/57] fix: test_uzf_negative_iuzfopt (#2236) Checking test_uzf_negative_iuzfopt with NumPy 2.0 (xref #2153), this test failed. The test criteria originally had several issues, including confusing extpd as pet. These issues are fixed in this PR to properly compare expected array values. --- autotest/test_uzf.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/autotest/test_uzf.py b/autotest/test_uzf.py index ac3710ea1..9726163f0 100644 --- a/autotest/test_uzf.py +++ b/autotest/test_uzf.py @@ -647,15 +647,12 @@ def test_uzf_negative_iuzfopt(function_tmpdir): "uzf_neg.nam", version="mfnwt", model_ws=function_tmpdir ) - pet = ml2.uzf.pet.array - extpd = ml2.uzf.pet.array - - assert ( - np.max(pet) == np.min(pet) and np.max(pet) != 0.1 - ), "Read error for iuzfopt less than 0" - assert ( - np.max(extpd) == np.min(extpd) and np.max(extpd) != 0.2 - ), "Read error for iuzfopt less than 0" + np.testing.assert_array_equal( + ml2.uzf.pet.array, np.full((2, 1, 10, 10), 0.1, np.float32) + ) + np.testing.assert_array_equal( + ml2.uzf.extdp.array, np.full((2, 1, 10, 10), 0.2, np.float32) + ) def test_optionsblock_auxillary_typo(): From 1e44b3fd57bfad1602a06247e44878a7237e0e3a Mon Sep 17 00:00:00 2001 From: Mike Taves Date: Mon, 17 Jun 2024 23:16:09 +1200 Subject: [PATCH 29/57] refactor: fixes for numpy-2.0 deprecation warnings, require numpy>=1.20.3 (#2237) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR resolves several deprecation warnings from NumPy 2.0 (xref #2153): * np.in1d() → np.isin() * "a16" → "S16" to describe 16-length string of bytes * The numpy.core.records module is private, so use numpy.rec instead, which seems to work with older versions This also bumps the minimum version of numpy from 1.15.0 to 1.20.3, since this is the minimum for pandas 2.0.0, thus the minimum versions were incompatible. --- .docs/Notebooks/zonebudget_example.py | 2 +- README.md | 2 +- autotest/test_mf6.py | 2 +- autotest/test_mp6.py | 2 +- autotest/test_particledata.py | 14 +++++++------- etc/environment.yml | 4 ++-- flopy/modflow/mffhb.py | 4 ++-- flopy/modflow/mfgage.py | 2 +- flopy/modflow/mfsfr2.py | 6 ++---- flopy/modflow/mfstr.py | 8 ++------ flopy/plot/plotutil.py | 6 +++--- flopy/utils/binaryfile.py | 12 ++++++------ flopy/utils/datafile.py | 12 ++++++------ flopy/utils/modpathfile.py | 6 +++--- flopy/utils/recarray_utils.py | 4 ++-- flopy/utils/util_list.py | 2 +- flopy/utils/zonbud.py | 26 +++++++++++++------------- pyproject.toml | 2 +- 18 files changed, 55 insertions(+), 61 deletions(-) diff --git a/.docs/Notebooks/zonebudget_example.py b/.docs/Notebooks/zonebudget_example.py index 2ea5da4e5..ea9ee0bc0 100644 --- a/.docs/Notebooks/zonebudget_example.py +++ b/.docs/Notebooks/zonebudget_example.py @@ -111,7 +111,7 @@ inyrbud = inyr.get_budget() names = ["FROM_RECHARGE"] -rowidx = np.in1d(cmdbud["name"], names) +rowidx = np.isin(cmdbud["name"], names) colidx = "ZONE_1" print(f"{cmdbud[rowidx][colidx][0]:,.1f} cubic meters/day") diff --git a/README.md b/README.md index dfa55933c..fc5354d7d 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ Installation FloPy requires **Python** 3.8+ with: ``` -numpy >=1.15.0,<2.0.0 +numpy >=1.20.3,<2.0.0 matplotlib >=1.4.0 pandas >=2.0.0 ``` diff --git a/autotest/test_mf6.py b/autotest/test_mf6.py index 34b9a25cd..59ae0336f 100644 --- a/autotest/test_mf6.py +++ b/autotest/test_mf6.py @@ -102,7 +102,7 @@ def write_head( ("kper", "i4"), ("pertim", "f8"), ("totim", "f8"), - ("text", "a16"), + ("text", "S16"), ("ncol", "i4"), ("nrow", "i4"), ("ilay", "i4"), diff --git a/autotest/test_mp6.py b/autotest/test_mp6.py index f481fffd7..ec1f22493 100644 --- a/autotest/test_mp6.py +++ b/autotest/test_mp6.py @@ -175,7 +175,7 @@ def test_get_destination_data(function_tmpdir, mp6_test_path): np.array(well_pthld)[["k", "i", "j"]].tolist(), dtype=starting_locs.dtype, ) - assert np.all(np.in1d(starting_locs, pathline_locs)) + assert np.all(np.isin(starting_locs, pathline_locs)) # test writing a shapefile of endpoints epd.write_shapefile( diff --git a/autotest/test_particledata.py b/autotest/test_particledata.py index d7fab204c..f3032cb6e 100644 --- a/autotest/test_particledata.py +++ b/autotest/test_particledata.py @@ -83,7 +83,7 @@ def test_particledata_structured_ctor_with_partlocs_as_list_of_tuples(): assert isinstance(data.particledata, pd.DataFrame) assert np.array_equal( data.particledata.to_records(index=False), - np.core.records.fromrecords( + np.rec.fromrecords( [ (0, 1, 1, 0.5, 0.5, 0.5, 0.0, 0), (0, 1, 2, 0.5, 0.5, 0.5, 0.0, 0), @@ -102,7 +102,7 @@ def test_particledata_structured_ctor_with_partlocs_as_ndarray(): assert isinstance(data.particledata, pd.DataFrame) assert np.array_equal( data.particledata.to_records(index=False), - np.core.records.fromrecords( + np.rec.fromrecords( [ (0, 1, 1, 0.5, 0.5, 0.5, 0.0, 0), (0, 1, 2, 0.5, 0.5, 0.5, 0.0, 0), @@ -121,7 +121,7 @@ def test_particledata_unstructured_ctor_with_partlocs_as_ndarray(): assert isinstance(data.particledata, pd.DataFrame) assert np.array_equal( data.particledata.to_records(index=False), - np.core.records.fromrecords( + np.rec.fromrecords( [ (0, 0.5, 0.5, 0.5, 0.0, 0), (1, 0.5, 0.5, 0.5, 0.0, 0), @@ -141,7 +141,7 @@ def test_particledata_unstructured_ctor_with_partlocs_as_list(): assert isinstance(data.particledata, pd.DataFrame) assert np.array_equal( data.particledata.to_records(index=False), - np.core.records.fromrecords( + np.rec.fromrecords( [ (0, 0.5, 0.5, 0.5, 0.0, 0), (1, 0.5, 0.5, 0.5, 0.0, 0), @@ -161,7 +161,7 @@ def test_particledata_unstructured_ctor_with_partlocs_as_ndarray(): assert isinstance(data.particledata, pd.DataFrame) assert np.array_equal( data.particledata.to_records(index=False), - np.core.records.fromrecords( + np.rec.fromrecords( [ (0, 0.5, 0.5, 0.5, 0.0, 0), (1, 0.5, 0.5, 0.5, 0.0, 0), @@ -181,7 +181,7 @@ def test_particledata_structured_ctor_with_partlocs_as_list_of_lists(): assert isinstance(data.particledata, pd.DataFrame) assert np.array_equal( data.particledata.to_records(index=False), - np.core.records.fromrecords( + np.rec.fromrecords( [ (0, 1, 1, 0.5, 0.5, 0.5, 0.0, 0), (0, 1, 2, 0.5, 0.5, 0.5, 0.0, 0), @@ -212,7 +212,7 @@ def test_particledata_to_prp_dis_1(): ) # each coord should be a tuple (irpt, k, i, j, x, y, z) # expected - exp = np.core.records.fromrecords( + exp = np.rec.fromrecords( [ (0, 1, 1, 0.5, 0.5, 0.5, 0.0, 0), (0, 1, 2, 0.5, 0.5, 0.5, 0.0, 0), diff --git a/etc/environment.yml b/etc/environment.yml index b3a489451..fdd7cb1dc 100644 --- a/etc/environment.yml +++ b/etc/environment.yml @@ -6,8 +6,9 @@ dependencies: # required - python>=3.8 - - numpy>=1.15.0,<2.0.0 + - numpy>=1.20.3,<2.0.0 - matplotlib>=1.4.0 + - pandas>=2.0.0 # lint - cffconvert @@ -33,7 +34,6 @@ dependencies: # optional - affine - scipy - - pandas - netcdf4 - pyshp - rasterio diff --git a/flopy/modflow/mffhb.py b/flopy/modflow/mffhb.py index 0706901f8..7e68a08be 100644 --- a/flopy/modflow/mffhb.py +++ b/flopy/modflow/mffhb.py @@ -210,7 +210,7 @@ def __init__( ds5 = ds5.to_records(index=False) # convert numpy array to a recarray if ds5.dtype != dtype: - ds5 = np.core.records.fromarrays(ds5.transpose(), dtype=dtype) + ds5 = np.rec.fromarrays(ds5.transpose(), dtype=dtype) # assign dataset 5 self.ds5 = ds5 @@ -229,7 +229,7 @@ def __init__( ds7 = ds7.to_records(index=False) # convert numpy array to a recarray if ds7.dtype != dtype: - ds7 = np.core.records.fromarrays(ds7.transpose(), dtype=dtype) + ds7 = np.rec.fromarrays(ds7.transpose(), dtype=dtype) # assign dataset 7 self.ds7 = ds7 diff --git a/flopy/modflow/mfgage.py b/flopy/modflow/mfgage.py index ba2463ff2..1b7c4b76d 100644 --- a/flopy/modflow/mfgage.py +++ b/flopy/modflow/mfgage.py @@ -131,7 +131,7 @@ def __init__( # convert gage_data to a recarray, if necessary if isinstance(gage_data, np.ndarray): if not gage_data.dtype == dtype: - gage_data = np.core.records.fromarrays( + gage_data = np.rec.fromarrays( gage_data.transpose(), dtype=dtype ) elif isinstance(gage_data, pd.DataFrame): diff --git a/flopy/modflow/mfsfr2.py b/flopy/modflow/mfsfr2.py index dc155ecc7..e21915e29 100644 --- a/flopy/modflow/mfsfr2.py +++ b/flopy/modflow/mfsfr2.py @@ -1372,9 +1372,7 @@ def get_variable_by_stress_period(self, varname): all_data[inds, per] = self.segment_data[per][varname] dtype.append((f"{varname}{per}", float)) isvar = all_data.sum(axis=1) != 0 - ra = np.core.records.fromarrays( - all_data[isvar].transpose().copy(), dtype=dtype - ) + ra = np.rec.fromarrays(all_data[isvar].transpose().copy(), dtype=dtype) segs = self.segment_data[0].nseg[isvar] isseg = np.array( [True if s in segs else False for s in self.reach_data.iseg] @@ -1387,7 +1385,7 @@ def get_variable_by_stress_period(self, varname): return ra.view(np.recarray) def repair_outsegs(self): - isasegment = np.in1d( + isasegment = np.isin( self.segment_data[0].outseg, self.segment_data[0].nseg ) isasegment = isasegment | (self.segment_data[0].outseg < 0) diff --git a/flopy/modflow/mfstr.py b/flopy/modflow/mfstr.py index d7a5aaf52..572efa7e1 100644 --- a/flopy/modflow/mfstr.py +++ b/flopy/modflow/mfstr.py @@ -371,9 +371,7 @@ def __init__( ) assert d.dtype == self.dtype, e elif isinstance(d, np.ndarray): - d = np.core.records.fromarrays( - d.transpose(), dtype=self.dtype - ) + d = np.rec.fromarrays(d.transpose(), dtype=self.dtype) elif isinstance(d, int): if model.verbose: if d < 0: @@ -404,9 +402,7 @@ def __init__( ) assert d.dtype == self.dtype2, e elif isinstance(d, np.ndarray): - d = np.core.records.fromarrays( - d.transpose(), dtype=self.dtype2 - ) + d = np.rec.fromarrays(d.transpose(), dtype=self.dtype2) elif isinstance(d, int): if model.verbose: if d < 0: diff --git a/flopy/plot/plotutil.py b/flopy/plot/plotutil.py index ccbe5a272..2a55346d1 100644 --- a/flopy/plot/plotutil.py +++ b/flopy/plot/plotutil.py @@ -2732,7 +2732,7 @@ def to_mp7_pathlines( data = data.to_records(index=False) # build mp7 format recarray - ret = np.core.records.fromarrays( + ret = np.rec.fromarrays( [ data[seqn_key], data["iprp"], @@ -2841,7 +2841,7 @@ def to_mp7_endpoints( endpts = endpts.to_records(index=False) # build mp7 format recarray - ret = np.core.records.fromarrays( + ret = np.rec.fromarrays( [ endpts["sequencenumber"], endpts["iprp"], @@ -2928,7 +2928,7 @@ def to_prt_pathlines( data = data.to_records(index=False) # build prt format recarray - ret = np.core.records.fromarrays( + ret = np.rec.fromarrays( [ data["stressperiod"], data["timestep"], diff --git a/flopy/utils/binaryfile.py b/flopy/utils/binaryfile.py index 71cafb97c..157cb1740 100644 --- a/flopy/utils/binaryfile.py +++ b/flopy/utils/binaryfile.py @@ -299,7 +299,7 @@ def binaryread_struct(file, vartype, shape=(1,), charlen=16): # find the number of bytes for one value numbytes = vartype(1).nbytes # find the number of values - nval = np.core.fromnumeric.prod(shape) + nval = np.prod(shape) fmt = str(nval) + fmt s = file.read(numbytes * nval) result = struct.unpack(fmt, s) @@ -1138,7 +1138,7 @@ def _set_precision(self, precision="single"): h1dt = [ ("kstp", "i4"), ("kper", "i4"), - ("text", "a16"), + ("text", "S16"), ("ncol", "i4"), ("nrow", "i4"), ("nlay", "i4"), @@ -1161,10 +1161,10 @@ def _set_precision(self, precision="single"): ("delt", ffmt), ("pertim", ffmt), ("totim", ffmt), - ("modelnam", "a16"), - ("paknam", "a16"), - ("modelnam2", "a16"), - ("paknam2", "a16"), + ("modelnam", "S16"), + ("paknam", "S16"), + ("modelnam2", "S16"), + ("paknam2", "S16"), ] self.header1_dtype = np.dtype(h1dt) self.header2_dtype0 = np.dtype(h2dt0) diff --git a/flopy/utils/datafile.py b/flopy/utils/datafile.py index acf4ac066..51244a4b4 100644 --- a/flopy/utils/datafile.py +++ b/flopy/utils/datafile.py @@ -43,7 +43,7 @@ def __init__(self, filetype=None, precision="single"): ("kper", "i4"), ("pertim", floattype), ("totim", floattype), - ("text", "a16"), + ("text", "S16"), ("ncol", "i4"), ("nrow", "i4"), ("ilay", "i4"), @@ -56,7 +56,7 @@ def __init__(self, filetype=None, precision="single"): ("kper", "i4"), ("pertim", floattype), ("totim", floattype), - ("text", "a16"), + ("text", "S16"), ("ncol", "i4"), ("nrow", "i4"), ("ilay", "i4"), @@ -69,7 +69,7 @@ def __init__(self, filetype=None, precision="single"): ("kstp", "i4"), ("kper", "i4"), ("totim", floattype), - ("text", "a16"), + ("text", "S16"), ("ncol", "i4"), ("nrow", "i4"), ("ilay", "i4"), @@ -82,7 +82,7 @@ def __init__(self, filetype=None, precision="single"): ("kper", "i4"), ("pertim", floattype), ("totim", floattype), - ("text", "a16"), + ("text", "S16"), ("m1", "i4"), ("m2", "i4"), ("m3", "i4"), @@ -95,7 +95,7 @@ def __init__(self, filetype=None, precision="single"): ("kper", "i4"), ("pertim", floattype), ("totim", floattype), - ("text", "a16"), + ("text", "S16"), ("m1", "i4"), ("m2", "i4"), ("m3", "i4"), @@ -108,7 +108,7 @@ def __init__(self, filetype=None, precision="single"): ("kper", "i4"), ("pertim", floattype), ("totim", floattype), - ("text", "a16"), + ("text", "S16"), ("m1", "i4"), ("m2", "i4"), ("m3", "i4"), diff --git a/flopy/utils/modpathfile.py b/flopy/utils/modpathfile.py index 8a4dc9347..5bc3e2060 100644 --- a/flopy/utils/modpathfile.py +++ b/flopy/utils/modpathfile.py @@ -124,12 +124,12 @@ def intersect( cells = t cells = np.array(cells, dtype=raslice.dtype) - inds = np.in1d(raslice, cells) + inds = np.isin(raslice, cells) epdest = self._data[inds].copy().view(np.recarray) if to_recarray: # use particle ids to get the rest of the paths - inds = np.in1d(self._data["particleid"], epdest.particleid) + inds = np.isin(self._data["particleid"], epdest.particleid) series = self._data[inds].copy() series.sort(order=["particleid", "time"]) series = series.view(np.recarray) @@ -693,7 +693,7 @@ def get_destination_endpoint_data(self, dest_cells, source=False): dtype = np.dtype(dtype) dest_cells = np.array(dest_cells, dtype=dtype) - inds = np.in1d(raslice, dest_cells) + inds = np.isin(raslice, dest_cells) return data[inds].copy().view(np.recarray) def write_shapefile( diff --git a/flopy/utils/recarray_utils.py b/flopy/utils/recarray_utils.py index 27114c732..4b8a00f44 100644 --- a/flopy/utils/recarray_utils.py +++ b/flopy/utils/recarray_utils.py @@ -62,7 +62,7 @@ def ra_slice(ra, cols): -------- >>> import numpy as np >>> from flopy.utils import ra_slice - >>> a = np.core.records.fromrecords([("a", 1, 1.1), ("b", 2, 2.1)]) + >>> a = np.rec.fromrecords([("a", 1, 1.1), ("b", 2, 2.1)]) >>> ra_slice(a, ['f0', 'f1']) rec.array([('a', 1), ('b', 2)], dtype=[('f0', '=1.15.0,<2.0.0", + "numpy >=1.20.3,<2.0.0", "matplotlib >=1.4.0", "pandas >=2.0.0" ] From 18dfcb025165ae04faca1b831d3917263be089f0 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Mon, 17 Jun 2024 12:59:46 -0400 Subject: [PATCH 30/57] test: replace test_mf6_examples.py with CI test job (#2239) Remove `autotest/regression/test_mf6_examples.py` and the associated parametrization in `autotest/regression/conftest.py`. I wrote these shortly after joining the team. Eventually they evolved into modflow-devtools fixtures to feed MF6 example models to test functions. It is cleaner to just check out the examples repo and run its own suite against the flopy or mf6 under test, though. This PR adds a job for this to `mf6.yml`. I plan to retire the fixtures soon; a models API as proposed for a future flopy would supersede them anyway. This PR also moves regression testing to `commit.yml` and removes `regression.yml`. Regression tests were failing before this because the parametrization did not run GWF and GWE models in the proper order. --- .github/workflows/commit.yml | 2 +- .github/workflows/mf6.yml | 106 ++++++++++++++++------- .github/workflows/regression.yml | 65 -------------- autotest/regression/conftest.py | 97 --------------------- autotest/regression/test_mf6_examples.py | 93 -------------------- 5 files changed, 78 insertions(+), 285 deletions(-) delete mode 100644 .github/workflows/regression.yml delete mode 100644 autotest/regression/conftest.py delete mode 100644 autotest/regression/test_mf6_examples.py diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml index 0cc6dcbd3..0b51dfa91 100644 --- a/.github/workflows/commit.yml +++ b/.github/workflows/commit.yml @@ -176,7 +176,7 @@ jobs: - name: Run tests working-directory: autotest run: | - pytest -v -m="not example and not regression" -n=auto --cov=flopy --cov-append --cov-report=xml --durations=0 --keep-failed=.failed --dist loadfile + pytest -v -m="not example" -n=auto --cov=flopy --cov-append --cov-report=xml --durations=0 --keep-failed=.failed --dist loadfile coverage report env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/mf6.yml b/.github/workflows/mf6.yml index 389109e1a..72011cdfc 100644 --- a/.github/workflows/mf6.yml +++ b/.github/workflows/mf6.yml @@ -13,11 +13,9 @@ concurrency: cancel-in-progress: true jobs: - test: + test_mf6: name: Modflow6 FloPy tests runs-on: ubuntu-latest - strategy: - fail-fast: false defaults: run: shell: bash @@ -40,6 +38,7 @@ jobs: pip install https://github.com/Deltares/xmipy/zipball/develop pip install https://github.com/MODFLOW-USGS/modflowapi/zipball/develop pip install .[test,optional] + pip install meson ninja - name: Setup GNU Fortran uses: fortran-lang/setup-fortran@v1 @@ -53,46 +52,30 @@ jobs: repository: MODFLOW-USGS/modflow6 path: modflow6 - - name: Update flopy MODFLOW 6 classes - working-directory: modflow6/autotest - run: | - python update_flopy.py - - - name: Install meson - run: | - pip3 install meson ninja - - - name: Setup modflow + - name: Build and install MF6 working-directory: modflow6 run: | meson setup builddir --buildtype=debugoptimized --prefix=$(pwd) --libdir=bin - - - name: Build modflow - working-directory: modflow6 - run: | - meson compile -C builddir - - - name: Install modflow - working-directory: modflow6 - run: | meson install -C builddir + meson test --verbose --no-rebuild -C builddir + + - name: Update package classes + working-directory: modflow6/autotest + run: python update_flopy.py - - name: Get executables + - name: Install executables working-directory: modflow6/autotest env: GITHUB_TOKEN: ${{ github.token }} - run: | - pytest -v --durations=0 get_exes.py + run: pytest -v --durations=0 get_exes.py - name: Run tests working-directory: modflow6/autotest - run: | - pytest -v --cov=flopy --cov-report=xml --durations=0 -n auto -m "not repo and not regression" + run: pytest -v --cov=flopy --cov-report=xml --cov-append --durations=0 -n auto -m "not repo and not regression" - name: Print coverage report before upload working-directory: ./modflow6/autotest - run: | - coverage report + run: coverage report - name: Upload coverage to Codecov if: @@ -100,3 +83,68 @@ jobs: uses: codecov/codecov-action@v3 with: files: ./modflow6/autotest/coverage.xml + + test_mf6_examples: + name: MF6 examples FloPy tests + runs-on: ubuntu-latest + defaults: + run: + shell: bash + steps: + + - name: Checkout flopy repo + uses: actions/checkout@v4 + + - name: Checkout MODFLOW 6 + uses: actions/checkout@v4 + with: + repository: MODFLOW-USGS/modflow6 + path: modflow6 + + - name: Checkout MF6 examples + uses: actions/checkout@v4 + with: + repository: MODFLOW-USGS/modflow6-examples + path: modflow6-examples + + - name: Setup Python + uses: actions/setup-python@v5 + with: + python-version: 3.9 + cache: 'pip' + cache-dependency-path: pyproject.toml + + - name: Install Python dependencies + run: | + pip install --upgrade pip + pip install https://github.com/modflowpy/pymake/zipball/master + pip install https://github.com/Deltares/xmipy/zipball/develop + pip install https://github.com/MODFLOW-USGS/modflowapi/zipball/develop + pip install .[test,optional] + pip install meson ninja + pip install -r modflow6-examples/etc/requirements.pip.txt + + - name: Setup GNU Fortran + uses: fortran-lang/setup-fortran@v1 + with: + compiler: gcc + version: 13 + + - name: Install executables + uses: modflowpy/install-modflow-action@v1 + + - name: Build and install MF6 + working-directory: modflow6 + run: | + meson setup builddir --buildtype=debugoptimized --prefix=$(pwd) --libdir=bin + meson install -C builddir + meson test --verbose --no-rebuild -C builddir + cp bin/* ~/.local/bin/modflow/ + + - name: Update package classes + working-directory: modflow6/autotest + run: python update_flopy.py + + - name: Test MF6 examples + working-directory: modflow6-examples/autotest + run: pytest -v -n=auto --durations=0 test_scripts.py diff --git a/.github/workflows/regression.yml b/.github/workflows/regression.yml deleted file mode 100644 index 7bbc672fe..000000000 --- a/.github/workflows/regression.yml +++ /dev/null @@ -1,65 +0,0 @@ -name: FloPy regression tests - -on: - schedule: - - cron: '0 8 * * *' # run at 8 AM UTC (12 am PST) - -jobs: - regression: - name: Regression tests - runs-on: ${{ matrix.os }} - strategy: - fail-fast: false - matrix: - os: [ ubuntu-latest, macos-latest, windows-latest ] - python-version: [ 3.8, 3.9, "3.10", "3.11", "3.12" ] - exclude: - # avoid shutil.copytree infinite recursion bug - # https://github.com/python/cpython/pull/17098 - - python-version: '3.8.0' - defaults: - run: - shell: bash -l {0} - timeout-minutes: 90 - steps: - - name: Checkout repo - uses: actions/checkout@v4 - - - name: Setup Micromamba - uses: mamba-org/setup-micromamba@v1 - with: - environment-file: etc/environment.yml - cache-environment: true - cache-downloads: true - create-args: >- - python=${{ matrix.python-version }} - init-shell: >- - bash - powershell - - - name: Install FloPy - run: pip install . - - - name: Install Modflow-related executables - uses: modflowpy/install-modflow-action@v1 - - - name: Install Modflow dev build executables - uses: modflowpy/install-modflow-action@v1 - with: - repo: modflow6-nightly-build - - - name: Update FloPy packages - run: python -m flopy.mf6.utils.generate_classes --ref develop --no-backup - - - name: Run regression tests - working-directory: autotest - run: pytest -v -m="regression" -n=auto --durations=0 --keep-failed=.failed - env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - - - name: Upload failed test outputs - uses: actions/upload-artifact@v4 - if: failure() - with: - name: failed-regression-${{ matrix.os }}-${{ matrix.python-version }} - path: autotest/.failed/** diff --git a/autotest/regression/conftest.py b/autotest/regression/conftest.py deleted file mode 100644 index 66c064645..000000000 --- a/autotest/regression/conftest.py +++ /dev/null @@ -1,97 +0,0 @@ -from itertools import groupby -from os import linesep -from pathlib import Path -from tempfile import gettempdir - -import pytest -from filelock import FileLock -from modflow_devtools.download import download_and_unzip - -__mf6_examples = "mf6_examples" -__mf6_examples_path = Path(gettempdir()) / __mf6_examples -__mf6_examples_lock = FileLock(Path(gettempdir()) / f"{__mf6_examples}.lock") - - -def get_mf6_examples_path() -> Path: - # use file lock so mf6 distribution is downloaded once, - # even when tests are run in parallel with pytest-xdist - __mf6_examples_lock.acquire() - try: - if __mf6_examples_path.is_dir() and any(__mf6_examples_path.glob("*")): - print("Example models already exist") - else: - __mf6_examples_path.mkdir(exist_ok=True) - print("Downloading example models") - download_and_unzip( - url="https://github.com/MODFLOW-USGS/modflow6-examples/releases/download/current/modflow6-examples.zip", - path=str(__mf6_examples_path), - verbose=True, - ) - return __mf6_examples_path - finally: - __mf6_examples_lock.release() - - -def is_nested(namfile) -> bool: - p = Path(namfile) - if not p.is_file() or not p.name.endswith(".nam"): - raise ValueError(f"Expected a namfile path, got {p}") - - return p.parent.parent.name != __mf6_examples - - -def pytest_generate_tests(metafunc): - # examples to skip: - # - ex-gwtgwt-mt3dms-p10: https://github.com/MODFLOW-USGS/modflow6/pull/1008 - exclude = ["ex-gwt-gwtgwt-mt3dms-p10"] - namfiles = [ - str(p) - for p in get_mf6_examples_path().rglob("mfsim.nam") - if not any(e in str(p) for e in exclude) - ] - - # parametrization by model - # - single namfile per test case - # - no coupling (only first model in each simulation subdir is used) - key = "mf6_example_namfile" - if key in metafunc.fixturenames: - metafunc.parametrize(key, sorted(namfiles)) - - # parametrization by simulation - # - each test case gets an ordered list of 1+ namfiles - # - models can be coupled (run in order provided, sharing workspace) - key = "mf6_example_namfiles" - if key in metafunc.fixturenames: - simulations = [] - - def simulation_name_from_model_path(p): - p = Path(p) - return p.parent.parent.name if is_nested(p) else p.parent.name - - for model_name, model_namfiles in groupby( - namfiles, key=simulation_name_from_model_path - ): - models = sorted( - list(model_namfiles) - ) # sort in alphabetical order (gwf < gwt) - simulations.append(models) - print( - f"Simulation {model_name} has {len(models)} model(s):\n" - f"{linesep.join(model_namfiles)}" - ) - - def simulation_name_from_model_namfiles(mnams): - try: - namfile = next(iter(mnams), None) - except TypeError: - namfile = None - if namfile is None: - pytest.skip("No namfiles (expected ordered collection)") - namfile = Path(namfile) - return ( - namfile.parent.parent if is_nested(namfile) else namfile.parent - ).name - - metafunc.parametrize( - key, simulations, ids=simulation_name_from_model_namfiles - ) diff --git a/autotest/regression/test_mf6_examples.py b/autotest/regression/test_mf6_examples.py deleted file mode 100644 index 0b652dbf5..000000000 --- a/autotest/regression/test_mf6_examples.py +++ /dev/null @@ -1,93 +0,0 @@ -from pathlib import Path -from shutil import copytree - -import pytest -from modflow_devtools.markers import requires_exe, requires_pkg - -from autotest.regression.conftest import is_nested -from flopy.mf6 import MFSimulation -from flopy.utils.compare import compare_heads - -pytestmark = pytest.mark.mf6 - - -@requires_exe("mf6") -@pytest.mark.slow -@pytest.mark.regression -def test_mf6_example_simulations(function_tmpdir, mf6_example_namfiles): - # MF6 examples parametrized by simulation. `mf6_example_namfiles` is a list - # of models to run in order provided. Coupled models share the same tempdir - # - # Parameters - # ---------- - # function_tmpdir: function-scoped temporary directory fixture - # mf6_example_namfiles: ordered list of namfiles for 1+ coupled models - - # make sure we have at least 1 name file - if len(mf6_example_namfiles) == 0: - pytest.skip("No namfiles (expected ordered collection)") - namfile = Path(mf6_example_namfiles[0]) # pull the first model's namfile - - # coupled models have nested dirs (e.g., 'mf6gwf' and 'mf6gwt') under model directory - # TODO: are there multiple types of couplings? e.g. besides GWF-GWT, mt3dms? - nested = is_nested(namfile) - function_tmpdir = Path( - function_tmpdir / "workspace" - ) # working directory (must not exist for copytree) - cmpdir = function_tmpdir / "compare" # comparison directory - - # copy model files into working directory - copytree( - src=namfile.parent.parent if nested else namfile.parent, - dst=function_tmpdir, - ) - - def run_models(): - # run models in order received (should be alphabetical, so gwf precedes gwt) - for namfile in mf6_example_namfiles: - namfile_path = Path(namfile).resolve() - namfile_name = namfile_path.name - model_path = namfile_path.parent - - # working directory must be named according to the name file's parent (e.g. - # 'mf6gwf') because coupled models refer to each other with relative paths - wrkdir = ( - Path(function_tmpdir / model_path.name) - if nested - else function_tmpdir - ) - - # load simulation - sim = MFSimulation.load( - namfile_name, version="mf6", exe_name="mf6", sim_ws=wrkdir - ) - assert isinstance(sim, MFSimulation) - - # run simulation - success, buff = sim.run_simulation(report=True) - assert success - - # change to comparison workspace - sim.simulation_data.mfpath.set_sim_path(cmpdir) - - # write simulation files and rerun - sim.write_simulation() - success, _ = sim.run_simulation() - assert success - - # get head file outputs - headfiles1 = [p for p in wrkdir.glob("*.hds")] - headfiles2 = [p for p in cmpdir.glob("*.hds")] - - # compare heads - assert compare_heads( - None, - None, - precision="double", - text="head", - files1=[str(p) for p in headfiles1], - files2=[str(p) for p in headfiles2], - outfile=cmpdir / "head_compare.dat", - ) - - run_models() From 59040d0948337245d6527671960b56446d39d4d3 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Mon, 17 Jun 2024 17:41:57 -0400 Subject: [PATCH 31/57] refactor: np.where(cond) -> np.asarray(cond).nonzero() (#2238) The docs for `np.where()` (https://numpy.org/doc/stable/reference/generated/numpy.where.html) suggest to prefer `nonzero()` over `where()` without `x` and `y` arguments. In the spirit of defensive programming I included `np.asarray(cond)` even where `cond` is already an array. This PR also fixes a bug I introduced in the model splitter in #2124: while E711 (https://www.flake8rules.com/rules/E711.html) dictates comparisons to `None` should use identity rather than equality, this rule should not be applied to NumPy array selection conditions as it will change the semantics: >>> a = np.array([None, None]) >>> a[a != None] array([], dtype=object) >>> a[a is not None] array([[None, None]], dtype=object) Unrelatedly, mark `test_mt3d.py::test_mfnwt_keat_uzf()` slow, it should not be included in smoke tests (this was causing the optional dependency CI tests to fail due to timeout). And clean up some unused imports in `conftest.py`. --- autotest/conftest.py | 4 +- autotest/test_lake_connections.py | 6 +- autotest/test_mt3d.py | 1 + autotest/test_sfr.py | 2 +- autotest/test_zonbud_utility.py | 4 +- flopy/discretization/grid.py | 6 +- flopy/discretization/structuredgrid.py | 4 +- flopy/export/netcdf.py | 2 +- flopy/export/utils.py | 2 +- flopy/export/vtk.py | 6 +- flopy/mf6/utils/lakpak_utils.py | 6 +- flopy/mf6/utils/model_splitter.py | 94 +++++++------ flopy/modflow/mfrch.py | 4 +- flopy/modflow/mfsfr2.py | 6 +- flopy/pest/params.py | 2 +- flopy/plot/plotutil.py | 6 +- flopy/utils/binaryfile.py | 30 ++-- flopy/utils/check.py | 2 +- flopy/utils/compare.py | 4 +- flopy/utils/cvfdutil.py | 2 +- flopy/utils/datafile.py | 8 +- flopy/utils/flopy_io.py | 2 +- flopy/utils/formattedfile.py | 2 +- flopy/utils/geometry.py | 4 +- flopy/utils/gridgen.py | 2 +- flopy/utils/lgrutil.py | 2 +- flopy/utils/observationfile.py | 4 +- flopy/utils/particletrackfile.py | 14 +- flopy/utils/rasters.py | 4 +- flopy/utils/sfroutputfile.py | 4 +- flopy/utils/swroutputfile.py | 4 +- flopy/utils/triangle.py | 2 +- flopy/utils/util_list.py | 10 +- flopy/utils/voronoi.py | 4 +- flopy/utils/zonbud.py | 184 ++++++++++++------------- 35 files changed, 235 insertions(+), 208 deletions(-) diff --git a/autotest/conftest.py b/autotest/conftest.py index 6f770ef54..13e25a530 100644 --- a/autotest/conftest.py +++ b/autotest/conftest.py @@ -1,12 +1,10 @@ import re from importlib import metadata -from io import BytesIO, StringIO from pathlib import Path from platform import system -from typing import List, Optional +from typing import List import matplotlib.pyplot as plt -import numpy as np import pytest from modflow_devtools.misc import is_in_ci diff --git a/autotest/test_lake_connections.py b/autotest/test_lake_connections.py index ae0690b55..71bab3d00 100644 --- a/autotest/test_lake_connections.py +++ b/autotest/test_lake_connections.py @@ -216,7 +216,7 @@ def test_lake(function_tmpdir, example_data_path): # mm.plot_array(bot_tm) # determine a reasonable lake bottom - idx = np.where(lakes > -1) + idx = np.asarray(lakes > -1).nonzero() lak_bot = bot_tm[idx].max() + 2.0 # interpolate top elevations @@ -634,9 +634,9 @@ def test_embedded_lak_prudic_mixed(example_data_path): lake_map[0, :, :] = lakibd[:, :] - 1 lakebed_leakance = np.zeros(shape2d, dtype=object) - idx = np.where(lake_map[0, :, :] == 0) + idx = np.asarray(lake_map[0, :, :] == 0).nonzero() lakebed_leakance[idx] = "none" - idx = np.where(lake_map[0, :, :] == 1) + idx = np.asarray(lake_map[0, :, :] == 1).nonzero() lakebed_leakance[idx] = 1.0 lakebed_leakance = lakebed_leakance.tolist() diff --git a/autotest/test_mt3d.py b/autotest/test_mt3d.py index 27dfafff9..313809cc2 100644 --- a/autotest/test_mt3d.py +++ b/autotest/test_mt3d.py @@ -287,6 +287,7 @@ def test_mf2000_zeroth(function_tmpdir, mf2kmt3d_model_path): assert success, f"{mt.name} did not run" +@pytest.mark.slow @flaky(max_runs=3) @requires_exe("mfnwt", "mt3dms") @excludes_platform( diff --git a/autotest/test_sfr.py b/autotest/test_sfr.py index 452e87123..c50dcffe5 100644 --- a/autotest/test_sfr.py +++ b/autotest/test_sfr.py @@ -236,7 +236,7 @@ def interpolate_to_reaches(sfr): sfr.get_slopes(minimum_slope=-100, maximum_slope=100) reach_inds = 29 outreach = sfr.reach_data.outreach[reach_inds] - out_inds = np.where(sfr.reach_data.reachID == outreach) + out_inds = np.asarray(sfr.reach_data.reachID == outreach).nonzero() assert ( sfr.reach_data.slope[reach_inds] == ( diff --git a/autotest/test_zonbud_utility.py b/autotest/test_zonbud_utility.py index 8b4f35b66..86991e6b2 100644 --- a/autotest/test_zonbud_utility.py +++ b/autotest/test_zonbud_utility.py @@ -113,8 +113,8 @@ def test_compare2zonebudget(cbc_f, zon_f, zbud_f, rtol): zb_arr = zba[zba["totim"] == time] fp_arr = fpa[fpa["totim"] == time] for name in fp_arr["name"]: - r1 = np.where(zb_arr["name"] == name) - r2 = np.where(fp_arr["name"] == name) + r1 = np.asarray(zb_arr["name"] == name).nonzero() + r2 = np.asarray(fp_arr["name"] == name).nonzero() if r1[0].shape[0] < 1 or r2[0].shape[0] < 1: continue if r1[0].shape[0] != r2[0].shape[0]: diff --git a/flopy/discretization/grid.py b/flopy/discretization/grid.py index 0a091320d..2681a671c 100644 --- a/flopy/discretization/grid.py +++ b/flopy/discretization/grid.py @@ -455,15 +455,15 @@ def saturated_thickness(self, array, mask=None): bot = self.remove_confining_beds(bot) array = self.remove_confining_beds(array) - idx = np.where((array < top) & (array > bot)) + idx = np.asarray((array < top) & (array > bot)).nonzero() thickness[idx] = array[idx] - bot[idx] - idx = np.where(array <= bot) + idx = np.asarray(array <= bot).nonzero() thickness[idx] = 0.0 if mask is not None: if isinstance(mask, (float, int)): mask = [float(mask)] for mask_value in mask: - thickness[np.where(array == mask_value)] = np.nan + thickness[np.asarray(array == mask_value).nonzero()] = np.nan return thickness def saturated_thick(self, array, mask=None): diff --git a/flopy/discretization/structuredgrid.py b/flopy/discretization/structuredgrid.py index ba5a1a430..13f15346c 100644 --- a/flopy/discretization/structuredgrid.py +++ b/flopy/discretization/structuredgrid.py @@ -930,7 +930,7 @@ def intersect(self, x, y, z=None, local=False, forgive=False): "x, y point given is outside of the model area" ) else: - col = np.where(xcomp)[0][-1] + col = np.asarray(xcomp).nonzero()[0][-1] ycomp = y < ye if np.all(ycomp) or not np.any(ycomp): @@ -941,7 +941,7 @@ def intersect(self, x, y, z=None, local=False, forgive=False): "x, y point given is outside of the model area" ) else: - row = np.where(ycomp)[0][-1] + row = np.asarray(ycomp).nonzero()[0][-1] if np.any(np.isnan([row, col])): row = col = np.nan if z is not None: diff --git a/flopy/export/netcdf.py b/flopy/export/netcdf.py index cfc2907d2..eda44e117 100644 --- a/flopy/export/netcdf.py +++ b/flopy/export/netcdf.py @@ -632,7 +632,7 @@ def difference( d_data[np.isnan(d_data)] = FILLVALUE if mask_zero_diff: - d_data[np.where(d_data == 0.0)] = FILLVALUE + d_data[np.asarray(d_data == 0.0).nonzero()] = FILLVALUE var = new_net.create_variable( vname, attrs, s_var.dtype, dimensions=s_var.dimensions diff --git a/flopy/export/utils.py b/flopy/export/utils.py index 575413436..d0b577398 100644 --- a/flopy/export/utils.py +++ b/flopy/export/utils.py @@ -203,7 +203,7 @@ def _add_output_nc_variable( logger.log(f"creating array for {var_name}") for mask_val in mask_vals: - array[np.where(array == mask_val)] = np.nan + array[np.asarray(array == mask_val).nonzero()] = np.nan mx, mn = np.nanmax(array), np.nanmin(array) array[np.isnan(array)] = netcdf.FILLVALUE diff --git a/flopy/export/vtk.py b/flopy/export/vtk.py index 16aa5dcc0..e25c94695 100644 --- a/flopy/export/vtk.py +++ b/flopy/export/vtk.py @@ -574,7 +574,9 @@ def _build_hfbs(self, pkg): pts = [] for v in v1: - ix = np.where((v2.T[0] == v[0]) & (v2.T[1] == v[1])) + ix = np.asarray( + (v2.T[0] == v[0]) & (v2.T[1] == v[1]) + ).nonzero() if len(ix[0]) > 0 and len(pts) < 2: pts.append(v2[ix[0][0]]) @@ -652,7 +654,7 @@ def _build_point_scalar_array(self, array): ps_array[pt] = array[value["idx"][ix]] else: ps_graph = self._point_scalar_numpy_graph.copy() - idxs = np.where(np.isnan(array)) + idxs = np.asarray(np.isnan(array)).nonzero() not_graphed = np.isin(ps_graph, idxs[0]) ps_graph[not_graphed] = -1 ps_array = np.where(ps_graph >= 0, array[ps_graph], np.nan) diff --git a/flopy/mf6/utils/lakpak_utils.py b/flopy/mf6/utils/lakpak_utils.py index 9dc328293..8b1e1b25b 100644 --- a/flopy/mf6/utils/lakpak_utils.py +++ b/flopy/mf6/utils/lakpak_utils.py @@ -125,7 +125,7 @@ def get_lak_connections(modelgrid, lake_map, idomain=None, bedleak=None): unique = np.unique(lake_map) # exclude lakes with lake numbers less than 0 - idx = np.where(unique > -1) + idx = np.asarray(unique > -1).nonzero() unique = unique[idx] dx, dy = None, None @@ -199,7 +199,9 @@ def get_lak_connections(modelgrid, lake_map, idomain=None, bedleak=None): # reset idomain for lake if iconn > 0: - idx = np.where((lake_map == lake_number) & (idomain > 0)) + idx = np.asarray( + (lake_map == lake_number) & (idomain > 0) + ).nonzero() idomain[idx] = 0 return idomain, connection_dict, connectiondata diff --git a/flopy/mf6/utils/model_splitter.py b/flopy/mf6/utils/model_splitter.py index 731387761..b16ba93b6 100644 --- a/flopy/mf6/utils/model_splitter.py +++ b/flopy/mf6/utils/model_splitter.py @@ -316,7 +316,7 @@ def load_node_mapping(self, sim, filename): for mkey in models: ncpl = self._new_ncpl[mkey] array = np.full((ncpl,), -1, dtype=int) - onode = np.where(model_array == mkey)[0] + onode = np.asarray(model_array == mkey).nonzero()[0] nnode = split_array[onode] array[nnode] = onode grid_info[mkey] = (array,) @@ -413,7 +413,7 @@ def optimize_splitting_mask(self, nparts): membership = np.array(membership, dtype=int) if laks: for lak in laks: - idx = np.where(lak_array == lak)[0] + idx = np.asarray(lak_array == lak).nonzero()[0] mnum = np.unique(membership[idx])[0] membership[idx] = mnum @@ -429,7 +429,7 @@ def optimize_splitting_mask(self, nparts): ev = np.equal(mnums1, mnums2) if np.all(ev): continue - idx = np.where(~ev)[0] + idx = np.asarray(~ev).nonzero()[0] mnum_to = mnums1[idx] adj_nodes = nodes2[idx] membership[adj_nodes] = mnum_to @@ -471,7 +471,7 @@ def reconstruct_array(self, arrays): array = array.ravel() ncpl = self._new_ncpl[mkey] mapping = self._grid_info[mkey][-1] - old_nodes = np.where(mapping != -1) + old_nodes = np.asarray(mapping != -1).nonzero() new_nodes = mapping[old_nodes] old_nodes = np.tile(old_nodes, (nlay, 1)) @@ -645,7 +645,7 @@ def _remap_nodes(self, array): bad_keys = [] for mkey in mkeys: count = 0 - mask = np.where(array == mkey) + mask = np.asarray(array == mkey).nonzero() for arr in idomain: check = arr[mask] count += np.count_nonzero(check) @@ -670,7 +670,7 @@ def _remap_nodes(self, array): if self._modelgrid.grid_type == "structured": a = array.reshape(self._modelgrid.nrow, self._modelgrid.ncol) for m in np.unique(a): - cells = np.where(a == m) + cells = np.asarray(a == m).nonzero() rmin, rmax = np.min(cells[0]), np.max(cells[0]) cmin, cmax = np.min(cells[1]), np.max(cells[1]) cellids = list(zip([0] * len(cells[0]), cells[0], cells[1])) @@ -702,7 +702,7 @@ def _remap_nodes(self, array): xverts, yverts = None, None for m in np.unique(array): - cells = np.where(array == m)[0] + cells = np.asarray(array == m).nonzero()[0] mapping = np.zeros( ( len( @@ -718,9 +718,9 @@ def _remap_nodes(self, array): if xverts is not None: mxv = xverts[cells] myv = yverts[cells] - xmidx = np.where(mxv == np.nanmin(mxv))[0] + xmidx = np.asarray(mxv == np.nanmin(mxv)).nonzero()[0] myv = myv[xmidx] - ymidx = np.where(myv == np.nanmin(myv))[0] + ymidx = np.asarray(myv == np.nanmin(myv)).nonzero()[0] self._offsets[m] = { "xorigin": np.nanmin(mxv[xmidx[0]]), @@ -736,11 +736,11 @@ def _remap_nodes(self, array): new_ncpl[m] *= i for mdl in np.unique(array): - mnodes = np.where(array == mdl)[0] + mnodes = np.asarray(array == mdl).nonzero()[0] mg_info = grid_info[mdl] if mg_info is not None: mapping = mg_info[-1] - new_nodes = np.where(mapping != -1)[0] + new_nodes = np.asarray(mapping != -1).nonzero()[0] old_nodes = mapping[new_nodes] for ix, nnode in enumerate(new_nodes): self._node_map[old_nodes[ix]] = (mdl, nnode) @@ -1163,7 +1163,7 @@ def _remap_array(self, item, mfarray, mapped_data, **kwargs): new_ncpl = self._new_ncpl[mkey] new_array = np.zeros(new_ncpl * nlay, dtype=dtype) mapping = self._grid_info[mkey][-1] - new_nodes = np.where(mapping != -1) + new_nodes = np.asarray(mapping != -1).nonzero() old_nodes = mapping[new_nodes] old_nodes = np.tile(old_nodes, (nlay, 1)) @@ -1263,7 +1263,7 @@ def _remap_mflist( new_model, new_node = self._get_new_model_new_node(nodes) for mkey, model in self._model_dict.items(): - idx = np.where(new_model == mkey)[0] + idx = np.asarray(new_model == mkey).nonzero()[0] if self._pkg_mover and transient: mvr_remap = { idx[i]: (model.name, i) for i in range(len(idx)) @@ -1363,7 +1363,7 @@ def _remap_uzf(self, package, mapped_data): name = package.filename self._uzf_remaps[name] = {} for mkey, model in self._model_dict.items(): - idx = np.where(new_model == mkey)[0] + idx = np.asarray(new_model == mkey).nonzero()[0] if len(idx) == 0: new_recarray = None else: @@ -1401,7 +1401,9 @@ def _remap_uzf(self, package, mapped_data): spd = {} for per, recarray in perioddata.items(): - idx = np.where(np.isin(recarray.ifno, uzf_nodes)) + idx = np.asarray( + np.isin(recarray.ifno, uzf_nodes) + ).nonzero() new_period = recarray[idx] new_period["ifno"] = [ uzf_remap[i] for i in new_period["ifno"] @@ -1547,7 +1549,7 @@ def _remap_lak(self, package, mapped_data): new_model, new_node = self._get_new_model_new_node(nodes) for mkey, model in self._model_dict.items(): - idx = np.where(new_model == mkey)[0] + idx = np.asarray(new_model == mkey).nonzero()[0] if len(idx) == 0: new_recarray = None else: @@ -1586,7 +1588,9 @@ def _remap_lak(self, package, mapped_data): if meta[0] == mkey: mapnos.append(lak) - idxs = np.where(np.isin(outlets.lakein, mapnos))[0] + idxs = np.asarray( + np.isin(outlets.lakein, mapnos) + ).nonzero()[0] if len(idxs) == 0: new_outlets = None else: @@ -1680,7 +1684,7 @@ def _remap_sfr(self, package, mapped_data): new_model, new_node = self._get_new_model_new_node(nodes) for mkey, model in self._model_dict.items(): - idx = np.where(new_model == mkey)[0] + idx = np.asarray(new_model == mkey).nonzero()[0] if len(idx) == 0: new_recarray = None continue @@ -1709,7 +1713,9 @@ def _remap_sfr(self, package, mapped_data): ) # now let's remap connection data and tag external exchanges - idx = np.where(np.isin(connectiondata.ifno, old_rno))[0] + idx = np.asarray( + np.isin(connectiondata.ifno, old_rno) + ).nonzero()[0] new_connectiondata = connectiondata[idx] ncons = [] for ix, rec in enumerate(new_connectiondata): @@ -1776,8 +1782,12 @@ def _remap_sfr(self, package, mapped_data): if m0 != m1: div_mover_ix.append(ix) - idx = np.where(np.isin(diversions.ifno, old_rno))[0] - idx = np.where(~np.isin(idx, div_mover_ix))[0] + idx = np.asarray( + np.isin(diversions.ifno, old_rno) + ).nonzero()[0] + idx = np.asarray( + ~np.isin(idx, div_mover_ix) + ).nonzero()[0] new_diversions = diversions[idx] new_rno = [ @@ -1802,23 +1812,25 @@ def _remap_sfr(self, package, mapped_data): # now we can do the stress period data spd = {} for kper, recarray in perioddata.items(): - idx = np.where(np.isin(recarray.ifno, old_rno))[0] + idx = np.asarray( + np.isin(recarray.ifno, old_rno) + ).nonzero()[0] new_spd = recarray[idx] if diversions is not None: - external_divs = np.where( + external_divs = np.asarray( np.isin(new_spd.idv, list(div_mvr_conn.keys())) - )[0] + ).nonzero()[0] if len(external_divs) > 0: for ix in external_divs: rec = recarray[ix] idv = recarray["idv"] div_mvr_conn[idv].append(rec["divflow"]) - idx = np.where( + idx = np.asarray( ~np.isin( new_spd.idv, list(div_mvr_conn.keys()) ) - )[0] + ).nonzero()[0] new_spd = new_spd[idx] @@ -1931,7 +1943,7 @@ def _remap_maw(self, package, mapped_data): maw_remaps = {} for mkey, model in self._model_dict.items(): - idx = np.where(new_model == mkey)[0] + idx = np.asarray(new_model == mkey).nonzero()[0] new_connectiondata = connectiondata[idx] if len(new_connectiondata) == 0: continue @@ -1965,7 +1977,9 @@ def _remap_maw(self, package, mapped_data): spd = {} for per, recarray in perioddata.items(): - idx = np.where(np.isin(recarray.ifno, maw_wellnos))[0] + idx = np.asarray( + np.isin(recarray.ifno, maw_wellnos) + ).nonzero()[0] if len(idx) > 0: new_recarray = recarray[idx] new_wellno = [ @@ -2030,7 +2044,7 @@ def _remap_csub(self, package, mapped_data): ninterbeds = None for mkey, model in self._model_dict.items(): - idx = np.where(new_model == mkey)[0] + idx = np.asarray(new_model == mkey).nonzero()[0] if len(idx) == 0: new_packagedata = None else: @@ -2052,7 +2066,7 @@ def _remap_csub(self, package, mapped_data): layers, nodes = self._cellid_to_layer_node(recarray.cellid) new_model, new_node = self._get_new_model_new_node(nodes) - idx = np.where(new_model == mkey)[0] + idx = np.asarray(new_model == mkey).nonzero()[0] if len(idx) == 0: continue @@ -2158,7 +2172,7 @@ def _remap_hfb(self, package, mapped_data): raise AssertionError("Models cannot be split along faults") for mkey, model in self._model_dict.items(): - idx = np.where(new_model1 == mkey)[0] + idx = np.asarray(new_model1 == mkey).nonzero()[0] if len(idx) == 0: new_recarray = None else: @@ -2262,7 +2276,7 @@ def _remap_obs(self, package, mapped_data, remapper, pkg_type=None): dtype=object, ) for mkey, model in self._model_dict.items(): - idx = np.where(new_model1 == mkey) + idx = np.asarray(new_model1 == mkey).nonzero() tmp_cellid = self._new_node_to_cellid( model, new_node1, layers1, idx ) @@ -2297,7 +2311,7 @@ def _remap_obs(self, package, mapped_data, remapper, pkg_type=None): ) for idt in set(idtype): remaps = remapper[idt] - idx = np.where(idtype == idt) + idx = np.asarray(idtype == idt).nonzero() new_cellid1[idx] = [ ( remaps[i][-1] + 1 @@ -2364,7 +2378,7 @@ def _remap_obs(self, package, mapped_data, remapper, pkg_type=None): dtype=object, ) for mkey, model in self._model_dict.items(): - idx = np.where(new_model1 == mkey) + idx = np.asarray(new_model1 == mkey).nonzero() idx = [ ix for ix, i in enumerate(recarray.id[idx]) @@ -2399,7 +2413,7 @@ def _remap_obs(self, package, mapped_data, remapper, pkg_type=None): new_model1[mm_idx] = tmp_models cellid2 = recarray.id2 - conv_idx = np.where((cellid2 is not None))[0] + conv_idx = np.asarray(cellid2 != None).nonzero()[0] # noqa: E711 if len(conv_idx) > 0: # do stuff # need to trap layers... if pkg_type is None: @@ -2454,9 +2468,9 @@ def _remap_obs(self, package, mapped_data, remapper, pkg_type=None): (len(new_node2),), None, dtype=object ) for mkey, model in self._model_dict.items(): - idx = np.where(new_model2 == mkey) + idx = np.asarray(new_model2 == mkey).nonzero() tmp_node = new_node2[idx] - cidx = np.where((tmp_node is not None)) + cidx = np.asarray((tmp_node != None)).nonzero() # noqa: E711 tmp_cellid = model.modelgrid.get_lrc( tmp_node[cidx].to_list() ) @@ -2501,7 +2515,7 @@ def _remap_obs(self, package, mapped_data, remapper, pkg_type=None): if idt is None: continue remaps = remapper[idt] - idx = np.where(idtype == idt) + idx = np.asarray(idtype == idt).nonzero() new_cellid2[idx] = [ ( remaps[i][-1] + 1 @@ -2536,7 +2550,7 @@ def _remap_obs(self, package, mapped_data, remapper, pkg_type=None): new_model1[idx] = mkey # now we remap the continuous data!!!! - idx = np.where(new_model1 == mkey)[0] + idx = np.asarray(new_model1 == mkey).nonzero()[0] if len(idx) == 0: continue @@ -2682,7 +2696,7 @@ def _remap_adv_tag(self, mkey, recarray, item, mapper): if meta[0] == mkey: mapnos.append(lak) - idxs = np.where(np.isin(recarray[item], mapnos))[0] + idxs = np.asarray(np.isin(recarray[item], mapnos)).nonzero()[0] if len(idxs) == 0: new_recarray = None else: diff --git a/flopy/modflow/mfrch.py b/flopy/modflow/mfrch.py index 4803dcfdc..a9a22e394 100644 --- a/flopy/modflow/mfrch.py +++ b/flopy/modflow/mfrch.py @@ -235,8 +235,8 @@ def check( if Tmean != 0: R_T = period_means / Tmean - lessthan = np.where(R_T < RTmin)[0] - greaterthan = np.where(R_T > RTmax)[0] + lessthan = np.asarray(R_T < RTmin).nonzero()[0] + greaterthan = np.asarray(R_T > RTmax).nonzero()[0] if len(lessthan) > 0: txt = ( diff --git a/flopy/modflow/mfsfr2.py b/flopy/modflow/mfsfr2.py index e21915e29..62112fb51 100644 --- a/flopy/modflow/mfsfr2.py +++ b/flopy/modflow/mfsfr2.py @@ -1511,7 +1511,7 @@ def plot_path(self, start_seg=None, end_seg=0, plot_segment_lines=True): # slice the path path = np.array(self.paths[start_seg]) - endidx = np.where(path == end_seg)[0] + endidx = np.asarray(path == end_seg).nonzero()[0] endidx = endidx if len(endidx) > 0 else None path = path[: np.squeeze(endidx)] path = [s for s in path if s > 0] # skip lakes for now @@ -1523,7 +1523,7 @@ def plot_path(self, start_seg=None, end_seg=0, plot_segment_lines=True): dist = np.cumsum(tmp.rchlen.values) * to_miles.get(mfunits, 1.0) # segment starts - starts = dist[np.where(tmp.ireach.values == 1)[0]] + starts = dist[np.asarray(tmp.ireach.values == 1).nonzero()[0]] ax = plt.subplots(figsize=(11, 8.5))[-1] ax.plot(dist, tops, label="Model top") @@ -2411,7 +2411,7 @@ def routing(self): # max node with * a tolerance # 1.25 * hyp is greater than distance of two diagonally adjacent nodes # where one is 1.5x larger than the other - breaks = np.where(dist > hyp * 1.25) + breaks = np.asarray(dist > hyp * 1.25).nonzero() breaks_reach_data = rd[breaks] segments_with_breaks = set(breaks_reach_data.iseg) if len(breaks) > 0: diff --git a/flopy/pest/params.py b/flopy/pest/params.py index 7dc2535f9..ece259d54 100644 --- a/flopy/pest/params.py +++ b/flopy/pest/params.py @@ -74,7 +74,7 @@ def zonearray2params( plist = [] for i, iz in enumerate(parzones): span = {} - span["idx"] = np.where(zonearray == iz) + span["idx"] = np.asarray(zonearray == iz).nonzero() parname = f"{partype}_{iz}" startvalue = parvals[i] p = Params( diff --git a/flopy/plot/plotutil.py b/flopy/plot/plotutil.py index 2a55346d1..4efab156a 100644 --- a/flopy/plot/plotutil.py +++ b/flopy/plot/plotutil.py @@ -1647,7 +1647,7 @@ def line_intersect_grid(ptsin, xgrid, ygrid): numb = (x2 - x1) * (y1 - y3) - (y2 - y1) * (x1 - x3) denom = (y4 - y3) * (x2 - x1) - (x4 - x3) * (y2 - y1) ua = np.ones(denom.shape, dtype=denom.dtype) * np.nan - idx = np.where(denom != 0.0) + idx = np.asarray(denom != 0.0).nonzero() ua[idx] = numa[idx] / denom[idx] del numa del numb @@ -2231,7 +2231,7 @@ def advanced_package_bc_helper(pkg, modelgrid, kper): idx = np.array([list(i) for i in mflist["cellid"]], dtype=int).T else: iuzfbnd = pkg.iuzfbnd.array - idx = np.where(iuzfbnd != 0) + idx = np.asarray(iuzfbnd != 0).nonzero() idx = np.append([[0] * idx[-1].size], idx, axis=0) elif pkg.package_type in ("lak", "maw"): if pkg.parent.version == "mf6": @@ -2239,7 +2239,7 @@ def advanced_package_bc_helper(pkg, modelgrid, kper): idx = np.array([list(i) for i in mflist["cellid"]], dtype=int).T else: lakarr = pkg.lakarr.array[kper] - idx = np.where(lakarr != 0) + idx = np.asarray(lakarr != 0).nonzero() idx = np.array(idx) else: raise NotImplementedError( diff --git a/flopy/utils/binaryfile.py b/flopy/utils/binaryfile.py index 157cb1740..94074f427 100644 --- a/flopy/utils/binaryfile.py +++ b/flopy/utils/binaryfile.py @@ -598,7 +598,7 @@ def get_ts(self, idx): # Find the time index and then put value into result in the # correct location. - itim = np.where(result[:, 0] == header["totim"])[0] + itim = np.asarray(result[:, 0] == header["totim"]).nonzero()[0] result[itim, istat] = binaryread(self.file, self.realtype) istat += 1 return result @@ -887,7 +887,9 @@ def _get_data_array(self, totim=0.0): """ if totim >= 0.0: - keyindices = np.where(self.recordarray["totim"] == totim)[0] + keyindices = np.asarray( + self.recordarray["totim"] == totim + ).nonzero()[0] if len(keyindices) == 0: msg = f"totim value ({totim}) not found in file..." raise Exception(msg) @@ -1604,7 +1606,9 @@ def get_indices(self, text=None): # check and make sure that text is in file if text is not None: text16 = self._find_text(text) - select_indices = np.where(self.recordarray["text"] == text16) + select_indices = np.asarray( + self.recordarray["text"] == text16 + ).nonzero() if isinstance(select_indices, tuple): select_indices = select_indices[0] else: @@ -1869,7 +1873,7 @@ def get_ts(self, idx, text=None, times=None): for vv in v: field = vv.dtype.names[2] - dix = np.where(np.isin(vv["node"], ndx))[0] + dix = np.asarray(np.isin(vv["node"], ndx)).nonzero()[0] if len(dix) > 0: result[itim, 1:] = vv[field][dix] @@ -2176,7 +2180,9 @@ def get_residual(self, totim, scaled=False): residual = np.zeros((nlay, nrow, ncol), dtype=float) if scaled: inflow = np.zeros((nlay, nrow, ncol), dtype=float) - select_indices = np.where(self.recordarray["totim"] == totim)[0] + select_indices = np.asarray( + self.recordarray["totim"] == totim + ).nonzero()[0] for i in select_indices: text = self.recordarray[i]["text"].decode() @@ -2187,9 +2193,9 @@ def get_residual(self, totim, scaled=False): residual -= flow[:, :, :] residual[:, :, 1:] += flow[:, :, :-1] if scaled: - idx = np.where(flow < 0.0) + idx = np.asarray(flow < 0.0).nonzero() inflow[idx] -= flow[idx] - idx = np.where(flow > 0.0) + idx = np.asarray(flow > 0.0).nonzero() l, r, c = idx idx = (l, r, c + 1) inflow[idx] += flow[idx] @@ -2197,9 +2203,9 @@ def get_residual(self, totim, scaled=False): residual -= flow[:, :, :] residual[:, 1:, :] += flow[:, :-1, :] if scaled: - idx = np.where(flow < 0.0) + idx = np.asarray(flow < 0.0).nonzero() inflow[idx] -= flow[idx] - idx = np.where(flow > 0.0) + idx = np.asarray(flow > 0.0).nonzero() l, r, c = idx idx = (l, r + 1, c) inflow[idx] += flow[idx] @@ -2207,16 +2213,16 @@ def get_residual(self, totim, scaled=False): residual -= flow[:, :, :] residual[1:, :, :] += flow[:-1, :, :] if scaled: - idx = np.where(flow < 0.0) + idx = np.asarray(flow < 0.0).nonzero() inflow[idx] -= flow[idx] - idx = np.where(flow > 0.0) + idx = np.asarray(flow > 0.0).nonzero() l, r, c = idx idx = (l + 1, r, c) inflow[idx] += flow[idx] else: residual += flow if scaled: - idx = np.where(flow > 0.0) + idx = np.asarray(flow > 0.0).nonzero() inflow[idx] += flow[idx] if scaled: diff --git a/flopy/utils/check.py b/flopy/utils/check.py index 662643d60..432dc5221 100644 --- a/flopy/utils/check.py +++ b/flopy/utils/check.py @@ -507,7 +507,7 @@ def values(self, a, criteria, error_name="", error_type="Warning"): True value in criteria. """ if np.any(criteria): - inds = np.where(criteria) + inds = np.asarray(criteria).nonzero() v = a[inds] # works with structured or unstructured pn = [self.package.name] * len(v) en = [error_name] * len(v) diff --git a/flopy/utils/compare.py b/flopy/utils/compare.py index 3e4100fba..2e5f3fd6b 100644 --- a/flopy/utils/compare.py +++ b/flopy/utils/compare.py @@ -40,7 +40,7 @@ def _diffmax(v1, v2): diff = abs(v1 - v2) diffmax = diff.max() - return diffmax, np.where(diff == diffmax) + return diffmax, np.asarray(diff == diffmax).nonzero() def _difftol(v1, v2, tol): @@ -75,7 +75,7 @@ def _difftol(v1, v2, tol): raise Exception(err) diff = abs(v1 - v2) - return diff.max(), np.where(diff > tol) + return diff.max(), np.asarray(diff > tol).nonzero() def compare_budget( diff --git a/flopy/utils/cvfdutil.py b/flopy/utils/cvfdutil.py index 3eb33d6e8..fea3b5602 100644 --- a/flopy/utils/cvfdutil.py +++ b/flopy/utils/cvfdutil.py @@ -324,7 +324,7 @@ def gridlist_to_verts(gridlist): vertdict = {} icell = 0 for sg in gridlist: - ilays, irows, icols = np.where(sg.idomain > 0) + ilays, irows, icols = np.asarray(sg.idomain > 0).nonzero() for _, i, j in zip(ilays, irows, icols): v = sg.get_cell_vertices(i, j) vertdict[icell] = v + [v[0]] diff --git a/flopy/utils/datafile.py b/flopy/utils/datafile.py index 51244a4b4..ca922c516 100644 --- a/flopy/utils/datafile.py +++ b/flopy/utils/datafile.py @@ -463,7 +463,9 @@ def _get_data_array(self, totim=0): """ if totim >= 0.0: - keyindices = np.where(self.recordarray["totim"] == totim)[0] + keyindices = np.asarray( + self.recordarray["totim"] == totim + ).nonzero()[0] if len(keyindices) == 0: msg = f"totim value ({totim}) not found in file..." raise Exception(msg) @@ -545,10 +547,10 @@ def get_data(self, kstpkper=None, idx=None, totim=None, mflay=None): if kstpkper is not None: kstp1 = kstpkper[0] + 1 kper1 = kstpkper[1] + 1 - idx = np.where( + idx = np.asarray( (self.recordarray["kstp"] == kstp1) & (self.recordarray["kper"] == kper1) - ) + ).nonzero() if idx[0].shape[0] == 0: raise Exception( f"get_data() error: kstpkper not found:{kstpkper}" diff --git a/flopy/utils/flopy_io.py b/flopy/utils/flopy_io.py index c3f94b62f..403f3e7a9 100644 --- a/flopy/utils/flopy_io.py +++ b/flopy/utils/flopy_io.py @@ -305,7 +305,7 @@ def flux_to_wel(cbc_file, text, precision="single", model=None, verbose=False): arr = arr[0] print(arr.max(), arr.min(), arr.sum()) # masked where zero - arr[np.where(arr == 0.0)] = np.nan + arr[np.asarray(arr == 0.0).nonzero()] = np.nan m4d[iper + 1] = arr iper += 1 diff --git a/flopy/utils/formattedfile.py b/flopy/utils/formattedfile.py index 02903ca21..ca9436ed2 100644 --- a/flopy/utils/formattedfile.py +++ b/flopy/utils/formattedfile.py @@ -303,7 +303,7 @@ def get_ts(self, idx): # Find the time index and then put value into result in the # correct location. - itim = np.where(result[:, 0] == header["totim"])[0] + itim = np.asarray(result[:, 0] == header["totim"]).nonzero()[0] result[itim, istat] = self._read_val(j) istat += 1 return result diff --git a/flopy/utils/geometry.py b/flopy/utils/geometry.py index dac27c144..578040d55 100644 --- a/flopy/utils/geometry.py +++ b/flopy/utils/geometry.py @@ -872,9 +872,9 @@ def point_in_polygon(xc, yc, polygon): yc - polygon[i][1] ) / (polygon[j][1] - polygon[i][1]) - comp = np.where( + comp = np.asarray( ((polygon[i][1] > yc) ^ (polygon[j][1] > yc)) & (xc < tmp) - ) + ).nonzero() j = i if len(comp[0]) > 0: diff --git a/flopy/utils/gridgen.py b/flopy/utils/gridgen.py index 27e9b1b34..437f3a0be 100644 --- a/flopy/utils/gridgen.py +++ b/flopy/utils/gridgen.py @@ -733,7 +733,7 @@ def plot( shapename = os.path.join(self.model_ws, "qtgrid") xmin, xmax, ymin, ymax = shapefile_extents(shapename) - idx = np.where(self.qtra.layer == layer)[0] + idx = np.asarray(self.qtra.layer == layer).nonzero()[0] pc = plot_shapefile( shapename, diff --git a/flopy/utils/lgrutil.py b/flopy/utils/lgrutil.py index b17839483..043b1a999 100644 --- a/flopy/utils/lgrutil.py +++ b/flopy/utils/lgrutil.py @@ -161,7 +161,7 @@ def __init__( # idomain assert idomainp.shape == (nlayp, nrowp, ncolp) self.idomain = idomainp - idxl, idxr, idxc = np.where(idomainp == 0) + idxl, idxr, idxc = np.asarray(idomainp == 0).nonzero() assert idxl.shape[0] > 1, "no zero values found in idomain" # child cells per parent and child cells per parent layer diff --git a/flopy/utils/observationfile.py b/flopy/utils/observationfile.py index 8d85aaf3d..3e5308b3a 100644 --- a/flopy/utils/observationfile.py +++ b/flopy/utils/observationfile.py @@ -104,7 +104,7 @@ def get_data(self, idx=None, obsname=None, totim=None): i0 = 0 i1 = self.data.shape[0] if totim is not None: - idx = np.where(self.data["totim"] == totim)[0][0] + idx = np.asarray(self.data["totim"] == totim).nonzero()[0][0] i0 = idx i1 = idx + 1 elif idx is not None: @@ -183,7 +183,7 @@ def get_dataframe( i0 = 0 i1 = self.data.shape[0] if totim is not None: - idx = np.where(self.data["totim"] == totim)[0][0] + idx = np.asarray(self.data["totim"] == totim).nonzero()[0][0] i0 = idx i1 = idx + 1 elif idx is not None: diff --git a/flopy/utils/particletrackfile.py b/flopy/utils/particletrackfile.py index 75ff3972d..50c004db8 100644 --- a/flopy/utils/particletrackfile.py +++ b/flopy/utils/particletrackfile.py @@ -106,16 +106,16 @@ def get_data( """ data = self._data[list(self.outdtype.names)] if minimal else self._data idx = ( - np.where(data["particleid"] == partid)[0] + np.asarray(data["particleid"] == partid).nonzero()[0] if totim is None else ( - np.where( + np.asarray( (data["time"] >= totim) & (data["particleid"] == partid) - )[0] + ).nonzero()[0] if ge - else np.where( + else np.asarray( (data["time"] <= totim) & (data["particleid"] == partid) - )[0] + ).nonzero()[0] ) ) @@ -145,9 +145,9 @@ def get_alldata(self, totim=None, ge=True, minimal=False): data = self._data[list(self.outdtype.names)] if minimal else self._data if totim is not None: idx = ( - np.where(data["time"] >= totim)[0] + np.asarray(data["time"] >= totim).nonzero()[0] if ge - else np.where(data["time"] <= totim)[0] + else np.asarray(data["time"] <= totim).nonzero()[0] ) if len(idx) > 0: data = data[idx] diff --git a/flopy/utils/rasters.py b/flopy/utils/rasters.py index 72e32eabf..f8f965edb 100644 --- a/flopy/utils/rasters.py +++ b/flopy/utils/rasters.py @@ -256,7 +256,7 @@ def sample_point(self, *point, band=1): dist = np.sqrt(xt + yt) # 3: find indices of minimum distance - md = np.where(dist == np.nanmin(dist)) + md = np.asarray(dist == np.nanmin(dist)).nonzero() # 4: sample the array and average if necessary vals = [] @@ -534,7 +534,7 @@ def crop(self, polygon, invert=False): xt = (pt[0] - xc) ** 2 yt = (pt[1] - yc) ** 2 hypot = np.sqrt(xt + yt) - ind = np.where(hypot == np.min(hypot)) + ind = np.asarray(hypot == np.min(hypot)).nonzero() yind.append(ind[0][0]) xind.append(ind[1][0]) diff --git a/flopy/utils/sfroutputfile.py b/flopy/utils/sfroutputfile.py index 7754d4da8..ba23a9b5b 100644 --- a/flopy/utils/sfroutputfile.py +++ b/flopy/utils/sfroutputfile.py @@ -147,7 +147,9 @@ def get_nstrm(df): Number of SFR cells """ - wherereach1 = np.where((df.segment == 1) & (df.reach == 1))[0] + wherereach1 = np.asarray( + (df.segment == 1) & (df.reach == 1) + ).nonzero()[0] if len(wherereach1) == 1: return len(df) elif len(wherereach1) > 1: diff --git a/flopy/utils/swroutputfile.py b/flopy/utils/swroutputfile.py index 991571eba..eff248868 100644 --- a/flopy/utils/swroutputfile.py +++ b/flopy/utils/swroutputfile.py @@ -231,11 +231,11 @@ def get_data(self, idx=None, kswrkstpkper=None, totim=None): kper1 = kswrkstpkper[2] totim1 = self._recordarray[ - np.where( + np.asarray( (self._recordarray["kswr"] == kswr1) & (self._recordarray["kstp"] == kstp1) & (self._recordarray["kper"] == kper1) - ) + ).nonzero() ]["totim"][0] elif totim is not None: totim1 = totim diff --git a/flopy/utils/triangle.py b/flopy/utils/triangle.py index a0d86d18f..d001bd73c 100644 --- a/flopy/utils/triangle.py +++ b/flopy/utils/triangle.py @@ -309,7 +309,7 @@ def plot_boundary(self, ibm, ax=None, **kwargs): """ if ax is None: ax = plt.gca() - idx = np.where(self.edge["boundary_marker"] == ibm)[0] + idx = np.asarray(self.edge["boundary_marker"] == ibm).nonzero()[0] for i in idx: iv1 = self.edge["endpoint1"][i] iv2 = self.edge["endpoint2"][i] diff --git a/flopy/utils/util_list.py b/flopy/utils/util_list.py index 382dbc65c..8616cf11d 100644 --- a/flopy/utils/util_list.py +++ b/flopy/utils/util_list.py @@ -810,15 +810,15 @@ def check_kij(self): data = self[kper] if data is not None: k = data["k"] - k_idx = np.where(np.logical_or(k < 0, k >= nl)) + k_idx = np.asarray(np.logical_or(k < 0, k >= nl)).nonzero() if k_idx[0].shape[0] > 0: out_idx.extend(list(k_idx[0])) i = data["i"] - i_idx = np.where(np.logical_or(i < 0, i >= nr)) + i_idx = np.asarray(np.logical_or(i < 0, i >= nr)).nonzero() if i_idx[0].shape[0] > 0: out_idx.extend(list(i_idx[0])) j = data["j"] - j_idx = np.where(np.logical_or(j < 0, j >= nc)) + j_idx = np.asarray(np.logical_or(j < 0, j >= nc)).nonzero() if j_idx[0].shape[0]: out_idx.extend(list(j_idx[0])) @@ -887,7 +887,9 @@ def attribute_by_kper(self, attr, function=np.mean, idx_val=None): kper_data = self.__data[kper] if idx_val is not None: kper_data = kper_data[ - np.where(kper_data[idx_val[0]] == idx_val[1]) + np.asarray( + kper_data[idx_val[0]] == idx_val[1] + ).nonzero() ] v = function(kper_data[attr]) values.append(v) diff --git a/flopy/utils/voronoi.py b/flopy/utils/voronoi.py index 15982d205..d3f52566a 100644 --- a/flopy/utils/voronoi.py +++ b/flopy/utils/voronoi.py @@ -153,11 +153,11 @@ def tri2vor(tri, **kwargs): polygon = [(x, y) for x, y in tri._polygons[ipolygon]] vor_vert_notindomain = point_in_polygon(xc, yc, polygon) vor_vert_notindomain = vor_vert_notindomain.flatten() - idx = np.where(vor_vert_notindomain == True) + idx = np.asarray(vor_vert_notindomain == True).nonzero() vor_vert_indomain[idx] = False idx_vertindex = -1 * np.ones((nvertices), int) - idx_filtered = np.where(vor_vert_indomain == True) + idx_filtered = np.asarray(vor_vert_indomain == True).nonzero() nvalid_vertices = len(idx_filtered[0]) # renumber valid vertices consecutively idx_vertindex[idx_filtered] = np.arange(nvalid_vertices) diff --git a/flopy/utils/zonbud.py b/flopy/utils/zonbud.py index 37593dfa5..39949a1b0 100644 --- a/flopy/utils/zonbud.py +++ b/flopy/utils/zonbud.py @@ -548,18 +548,18 @@ def _update_budget_recordarray( try: if kstpkper is not None: for rn, cn, flux in zip(rownames, colnames, fluxes): - rowidx = np.where( + rowidx = np.asarray( (self._budget["time_step"] == kstpkper[0]) & (self._budget["stress_period"] == kstpkper[1]) & (self._budget["name"] == rn) - ) + ).nonzero() self._budget[cn][rowidx] += flux elif totim is not None: for rn, cn, flux in zip(rownames, colnames, fluxes): - rowidx = np.where( + rowidx = np.asarray( (self._budget["totim"] == totim) & (self._budget["name"] == rn) - ) + ).nonzero() self._budget[cn][rowidx] += flux except Exception as e: @@ -592,9 +592,9 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): # ZONE 4 TO 3 IS THE NEGATIVE OF FLOW FROM 3 TO 4. # 1ST, CALCULATE FLOW BETWEEN NODE J,I,K AND J-1,I,K - k, i, j = np.where( + k, i, j = np.asarray( self.izone[:, :, 1:] > self.izone[:, :, :-1] - ) + ).nonzero() # Adjust column values to account for the starting position of "nz" j += 1 @@ -613,9 +613,9 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): # Don't include CH to CH flow (can occur if CHTOCH option is used) # Create an iterable tuple of (from zone, to zone, flux) # Then group tuple by (from_zone, to_zone) and sum the flux values - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nzl[idx], nz[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim @@ -625,18 +625,18 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): # Don't include CH to CH flow (can occur if CHTOCH option is used) # Create an iterable tuple of (from zone, to zone, flux) # Then group tuple by (from_zone, to_zone) and sum the flux values - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nzl[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) # FLOW BETWEEN NODE J,I,K AND J+1,I,K - k, i, j = np.where( + k, i, j = np.asarray( self.izone[:, :, :-1] > self.izone[:, :, 1:] - ) + ).nonzero() # Define the zone from which flow is coming nz = self.izone[k, i, j] @@ -652,9 +652,9 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): # Don't include CH to CH flow (can occur if CHTOCH option is used) # Create an iterable tuple of (from zone, to zone, flux) # Then group tuple by (from_zone, to_zone) and sum the flux values - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nzr[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim @@ -664,24 +664,24 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): # Don't include CH to CH flow (can occur if CHTOCH option is used) # Create an iterable tuple of (from zone, to zone, flux) # Then group tuple by (from_zone, to_zone) and sum the flux values - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nzr[idx], nz[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) # CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION - k, i, j = np.where(ich == 1) + k, i, j = np.asarray(ich == 1).nonzero() k, i, j = k[j > 0], i[j > 0], j[j > 0] jl = j - 1 nzl = self.izone[k, i, jl] nz = self.izone[k, i, j] q = data[k, i, jl] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] @@ -689,9 +689,9 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jl] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nzl[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi[tzi != 0]] @@ -699,7 +699,7 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - k, i, j = np.where(ich == 1) + k, i, j = np.asarray(ich == 1).nonzero() k, i, j = ( k[j < self.ncol - 1], i[j < self.ncol - 1], @@ -709,9 +709,9 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): jr = j + 1 nzr = self.izone[k, i, jr] q = data[k, i, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] @@ -719,9 +719,9 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, i, jr] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nzr[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi] @@ -732,7 +732,6 @@ def _accumulate_flow_frf(self, recname, ich, kstpkper, totim): except Exception as e: print(e) raise - return def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): """ @@ -756,64 +755,64 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): # "FLOW FRONT FACE" # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I-1,K - k, i, j = np.where( + k, i, j = np.asarray( self.izone[:, 1:, :] < self.izone[:, :-1, :] - ) + ).nonzero() i += 1 ia = i - 1 nza = self.izone[k, ia, j] nz = self.izone[k, i, j] q = data[k, ia, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I+1,K. - k, i, j = np.where( + k, i, j = np.asarray( self.izone[:, :-1, :] < self.izone[:, 1:, :] - ) + ).nonzero() nz = self.izone[k, i, j] ib = i + 1 nzb = self.izone[k, ib, j] q = data[k, i, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) # CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION - k, i, j = np.where(ich == 1) + k, i, j = np.asarray(ich == 1).nonzero() k, i, j = k[i > 0], i[i > 0], j[i > 0] ia = i - 1 nza = self.izone[k, ia, j] nz = self.izone[k, i, j] q = data[k, ia, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] @@ -821,9 +820,9 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ia, j] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi] @@ -831,7 +830,7 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - k, i, j = np.where(ich == 1) + k, i, j = np.asarray(ich == 1).nonzero() k, i, j = ( k[i < self.nrow - 1], i[i < self.nrow - 1], @@ -841,9 +840,9 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): ib = i + 1 nzb = self.izone[k, ib, j] q = data[k, i, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] @@ -851,9 +850,9 @@ def _accumulate_flow_fff(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[k, ib, j] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi] @@ -888,64 +887,64 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): # "FLOW LOWER FACE" # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K-1 - k, i, j = np.where( + k, i, j = np.asarray( self.izone[1:, :, :] < self.izone[:-1, :, :] - ) + ).nonzero() k += 1 ka = k - 1 nza = self.izone[ka, i, j] nz = self.izone[k, i, j] q = data[ka, i, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nza[idx], nz[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nza[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) # CALCULATE FLOW BETWEEN NODE J,I,K AND J,I,K+1 - k, i, j = np.where( + k, i, j = np.asarray( self.izone[:-1, :, :] < self.izone[1:, :, :] - ) + ).nonzero() nz = self.izone[k, i, j] kb = k + 1 nzb = self.izone[kb, i, j] q = data[k, i, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nz[idx], nzb[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) - ) + ).nonzero() fzi, tzi, fi = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) self._update_budget_fromfaceflow( fzi, tzi, np.abs(fi), kstpkper, totim ) # CALCULATE FLOW TO CONSTANT-HEAD CELLS IN THIS DIRECTION - k, i, j = np.where(ich == 1) + k, i, j = np.asarray(ich == 1).nonzero() k, i, j = k[k > 0], i[k > 0], j[k > 0] ka = k - 1 nza = self.izone[ka, i, j] nz = self.izone[k, i, j] q = data[ka, i, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] @@ -953,9 +952,9 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[ka, i, j] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nza[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi] @@ -963,7 +962,7 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - k, i, j = np.where(ich == 1) + k, i, j = np.asarray(ich == 1).nonzero() k, i, j = ( k[k < self.nlay - 1], i[k < self.nlay - 1], @@ -973,9 +972,9 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): kb = k + 1 nzb = self.izone[kb, i, j] q = data[k, i, j] - idx = np.where( + idx = np.asarray( (q > 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) fz = ["FROM_CONSTANT_HEAD"] * len(tzi) tz = [self._zonenamedict[z] for z in tzi] @@ -983,9 +982,9 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): fz, tz, np.abs(f), kstpkper, totim ) - idx = np.where( + idx = np.asarray( (q < 0) & ((ich[k, i, j] != 1) | (ich[kb, i, j] != 1)) - ) + ).nonzero() fzi, tzi, f = sum_flux_tuples(nzb[idx], nz[idx], q[idx]) fz = ["TO_CONSTANT_HEAD"] * len(fzi) tz = [self._zonenamedict[z] for z in tzi] @@ -996,7 +995,6 @@ def _accumulate_flow_flf(self, recname, ich, kstpkper, totim): except Exception as e: print(e) raise - return def _accumulate_flow_ssst(self, recname, kstpkper, totim): # NOT AN INTERNAL FLOW TERM, SO MUST BE A SOURCE TERM OR STORAGE @@ -1049,9 +1047,9 @@ def _accumulate_flow_ssst(self, recname, kstpkper, totim): # 1-LAYER ARRAY THAT DEFINES LAYER 1 qin = np.ma.zeros(self.cbc_shape, self.float_type) qout = np.ma.zeros(self.cbc_shape, self.float_type) - r, c = np.where(data > 0) + r, c = np.asarray(data > 0).nonzero() qin[0, r, c] = data[r, c] - r, c = np.where(data < 0) + r, c = np.asarray(data < 0).nonzero() qout[0, r, c] = data[r, c] else: # Should not happen @@ -1103,16 +1101,16 @@ def _compute_mass_balance(self, kstpkper, totim): innames = [n for n in recnames if n.startswith("FROM_")] outnames = [n for n in recnames if n.startswith("TO_")] if kstpkper is not None: - rowidx = np.where( + rowidx = np.asarray( (self._budget["time_step"] == kstpkper[0]) & (self._budget["stress_period"] == kstpkper[1]) - & np.isin(self._budget["name"], innames) - ) + & np.in1d(self._budget["name"], innames) + ).nonzero() elif totim is not None: - rowidx = np.where( + rowidx = np.asarray( (self._budget["totim"] == totim) - & np.isin(self._budget["name"], innames) - ) + & np.in1d(self._budget["name"], innames) + ).nonzero() a = _numpyvoid2numeric( self._budget[list(self._zonenamedict.values())][rowidx] ) @@ -1125,16 +1123,16 @@ def _compute_mass_balance(self, kstpkper, totim): # Compute outflows if kstpkper is not None: - rowidx = np.where( + rowidx = np.asarray( (self._budget["time_step"] == kstpkper[0]) & (self._budget["stress_period"] == kstpkper[1]) - & np.isin(self._budget["name"], outnames) - ) + & np.in1d(self._budget["name"], outnames) + ).nonzero() elif totim is not None: - rowidx = np.where( + rowidx = np.asarray( (self._budget["totim"] == totim) - & np.isin(self._budget["name"], outnames) - ) + & np.in1d(self._budget["name"], outnames) + ).nonzero() a = _numpyvoid2numeric( self._budget[list(self._zonenamedict.values())][rowidx] ) @@ -2462,7 +2460,7 @@ def _get_budget(recarray, zonenamedict, names=None, zones=None, net=False): if "totim" in recarray.dtype.names: standard_fields.insert(0, "totim") select_fields = standard_fields + list(zonenamedict.values()) - select_records = np.where(recarray["name"] == recarray["name"]) + select_records = np.asarray(recarray["name"] == recarray["name"]).nonzero() if zones is not None: for idx, z in enumerate(zones): if isinstance(z, int): @@ -2945,10 +2943,10 @@ def _pivot_recarray(recarray): pvt_rec = np.recarray((1,), dtype=dtype) n = 0 for kstp, kper in kstp_kper: - idxs = np.where( + idxs = np.asarray( (recarray["time_step"] == kstp) & (recarray["stress_period"] == kper) - ) + ).nonzero() if len(idxs) == 0: pass else: @@ -3008,7 +3006,7 @@ def _volumetric_flux(recarray, modeltime, extrapolate_kper=False): perlen = modeltime.perlen totim = np.add.accumulate(perlen) for per in range(nper): - idx = np.where(recarray["kper"] == per)[0] + idx = np.asarray(recarray["kper"] == per).nonzero()[0] if len(idx) == 0: continue @@ -3019,7 +3017,7 @@ def _volumetric_flux(recarray, modeltime, extrapolate_kper=False): if zone == 0: continue - zix = np.where(temp["zone"] == zone)[0] + zix = np.asarray(temp["zone"] == zone).nonzero()[0] if len(zix) == 0: raise Exception @@ -3052,9 +3050,9 @@ def _volumetric_flux(recarray, modeltime, extrapolate_kper=False): totim = modeltime.totim for ix, nstp in enumerate(modeltime.nstp): for stp in range(nstp): - idx = np.where( + idx = np.asarray( (recarray["kper"] == ix) & (recarray["kstp"] == stp) - ) + ).nonzero() if len(idx[0]) == 0: continue elif n == 0: From e48198c661d8b10d1c1120a88a6cd0c7987d7b22 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Wed, 19 Jun 2024 01:06:46 -0400 Subject: [PATCH 32/57] refactor(dependencies): support numpy 2 (#2241) Remove NumPy upper bound to close #2153. Also fix `@requires_pkg` usage in `test_export.py`, replace deprecated `itemset` in `mfdatastorage.py`, and bump some CI jobs to Python 3.9. This PR required updates on the MF6 side: * test(test_gwf_maw04.py): switch deprecated np.unicode_ -> np.str_ MODFLOW-USGS/modflow6#1886 * test(prt): omit name field (object dtype) from snapshots MODFLOW-USGS/modflow6#1887 --- .github/workflows/benchmark.yml | 4 ---- .github/workflows/commit.yml | 10 +++------- .github/workflows/examples.yml | 4 ---- .github/workflows/optional.yml | 2 +- .github/workflows/release.yml | 4 ++-- autotest/test_export.py | 4 ++-- etc/environment.yml | 2 +- flopy/mf6/data/mfdatastorage.py | 2 +- pyproject.toml | 2 +- 9 files changed, 11 insertions(+), 23 deletions(-) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 12fd54a1f..79ba1028d 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -13,10 +13,6 @@ jobs: matrix: os: [ ubuntu-latest, macos-latest, windows-latest ] python-version: [ 3.8, 3.9, "3.10", "3.11", "3.12" ] - exclude: - # avoid shutil.copytree infinite recursion bug - # https://github.com/python/cpython/pull/17098 - - python-version: '3.8.0' defaults: run: shell: bash -l {0} diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml index 0b51dfa91..23c4cc5d6 100644 --- a/.github/workflows/commit.yml +++ b/.github/workflows/commit.yml @@ -23,7 +23,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: 3.9 cache: 'pip' cache-dependency-path: pyproject.toml @@ -55,7 +55,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: 3.9 cache: 'pip' cache-dependency-path: pyproject.toml @@ -84,7 +84,7 @@ jobs: shell: bash timeout-minutes: 10 env: - PYTHON_VERSION: 3.8 + PYTHON_VERSION: 3.9 steps: - name: Checkout repo @@ -134,10 +134,6 @@ jobs: matrix: os: [ ubuntu-latest, macos-latest, windows-latest ] python-version: [ 3.8, 3.9, "3.10", "3.11", "3.12" ] - exclude: - # avoid shutil.copytree infinite recursion bug - # https://github.com/python/cpython/pull/17098 - - python-version: '3.8.0' defaults: run: shell: bash -l {0} diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index d021a66a5..2b78ec044 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -13,10 +13,6 @@ jobs: matrix: os: [ ubuntu-latest, macos-latest, windows-latest ] python-version: [ 3.8, 3.9, "3.10", "3.11", "3.12" ] - exclude: - # avoid shutil.copytree infinite recursion bug - # https://github.com/python/cpython/pull/17098 - - python-version: '3.8.0' defaults: run: shell: bash -l {0} diff --git a/.github/workflows/optional.yml b/.github/workflows/optional.yml index a84940cd9..c1962380a 100644 --- a/.github/workflows/optional.yml +++ b/.github/workflows/optional.yml @@ -14,7 +14,7 @@ jobs: shell: bash timeout-minutes: 10 env: - PYTHON_VERSION: 3.8 + PYTHON_VERSION: 3.9 strategy: fail-fast: false matrix: diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 57c19dadd..92c65dee9 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -27,7 +27,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: 3.9 cache: 'pip' cache-dependency-path: pyproject.toml @@ -211,7 +211,7 @@ jobs: - name: Setup Python uses: actions/setup-python@v5 with: - python-version: 3.8 + python-version: 3.9 cache: 'pip' cache-dependency-path: pyproject.toml diff --git a/autotest/test_export.py b/autotest/test_export.py index df550590b..4d9b9f379 100644 --- a/autotest/test_export.py +++ b/autotest/test_export.py @@ -410,7 +410,7 @@ def test_export_shapefile_polygon_closed(function_tmpdir): @excludes_platform("Windows") -@requires_pkg("rasterio", "shapefile", "scipy") +@requires_pkg("rasterio", "pyshp", "scipy", name_map={"pyshp": "shapefile"}) def test_export_array(function_tmpdir, example_data_path): import rasterio from scipy.ndimage import rotate @@ -1992,7 +1992,7 @@ def test_vtk_export_disu2_grid(function_tmpdir, example_data_path): @pytest.mark.mf6 @requires_exe("mf6", "gridgen") -@requires_pkg("vtk", "shapefile", "shapely") +@requires_pkg("vtk", "pyshp", "shapely", name_map={"pyshp": "shapefile"}) def test_vtk_export_disu_model(function_tmpdir): from vtkmodules.util.numpy_support import vtk_to_numpy diff --git a/etc/environment.yml b/etc/environment.yml index fdd7cb1dc..504f80c86 100644 --- a/etc/environment.yml +++ b/etc/environment.yml @@ -6,7 +6,7 @@ dependencies: # required - python>=3.8 - - numpy>=1.20.3,<2.0.0 + - numpy>=1.20.3 - matplotlib>=1.4.0 - pandas>=2.0.0 diff --git a/flopy/mf6/data/mfdatastorage.py b/flopy/mf6/data/mfdatastorage.py index 8a5741353..7f66574c5 100644 --- a/flopy/mf6/data/mfdatastorage.py +++ b/flopy/mf6/data/mfdatastorage.py @@ -2535,7 +2535,7 @@ def _fill_dimensions(self, data_iter, dimensions): data_array = np.ndarray(shape=dimensions, dtype=np_dtype) # fill array for index in ArrayIndexIter(dimensions): - data_array.itemset(index, next(data_iter)) + data_array[index] = next(data_iter) return data_array elif self.data_structure_type == DataStructureType.scalar: return next(data_iter) diff --git a/pyproject.toml b/pyproject.toml index 98509cb2c..f53c5e6cd 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -29,7 +29,7 @@ classifiers = [ ] requires-python = ">=3.8" dependencies = [ - "numpy >=1.20.3,<2.0.0", + "numpy>=1.20.3", "matplotlib >=1.4.0", "pandas >=2.0.0" ] From 678bb61346bc226831ae5b66615bc9a00c355cc5 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Wed, 19 Jun 2024 11:09:24 -0400 Subject: [PATCH 33/57] fix(PlotMapView): default to all layers in plot_pathline() (#2242) --- flopy/plot/map.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flopy/plot/map.py b/flopy/plot/map.py index 27b4677c0..06473bf7f 100644 --- a/flopy/plot/map.py +++ b/flopy/plot/map.py @@ -797,7 +797,7 @@ def plot_pathline(self, pl, travel_time=None, **kwargs): else: kon = self.layer else: - kon = self.layer + kon = -1 # configure plot settings marker = kwargs.pop("marker", None) From 2d372c653ad17aea0f93deb4a5d1e6b5d385c5d6 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Thu, 20 Jun 2024 18:15:42 -0400 Subject: [PATCH 34/57] chore: remove code.json and disclaimer, update scripts (#2240) Remove code.json and disclaimer after discussion as they are no longer needed for this repo. Also a bit of miscellaneous cleanup. --- DISCLAIMER.md | 11 --- README.md | 14 --- code.json | 49 ---------- docs/PyPI_release.md | 14 --- flopy/DISCLAIMER.md | 11 --- scripts/README.md | 7 +- scripts/update_version.py | 193 ++++++++++---------------------------- 7 files changed, 49 insertions(+), 250 deletions(-) delete mode 100644 DISCLAIMER.md delete mode 100644 code.json delete mode 100644 flopy/DISCLAIMER.md diff --git a/DISCLAIMER.md b/DISCLAIMER.md deleted file mode 100644 index c3b346b8d..000000000 --- a/DISCLAIMER.md +++ /dev/null @@ -1,11 +0,0 @@ -Disclaimer ----------- - -This software is preliminary or provisional and is subject to revision. It is -being provided to meet the need for timely best science. This software is -provided "as is" and "as-available", and makes no representations or warranties -of any kind concerning the software, whether express, implied, statutory, or -other. This includes, without limitation, warranties of title, -merchantability, fitness for a particular purpose, non-infringement, absence -of latent or other defects, accuracy, or the presence or absence of errors, -whether or not known or discoverable. \ No newline at end of file diff --git a/README.md b/README.md index fc5354d7d..c364d247a 100644 --- a/README.md +++ b/README.md @@ -170,17 +170,3 @@ MODFLOW Resources + [Online guide for MODFLOW-2000](https://water.usgs.gov/nrp/gwsoftware/modflow2000/Guide/) + [Online guide for MODFLOW-2005](https://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/) + [Online guide for MODFLOW-NWT](https://water.usgs.gov/ogw/modflow-nwt/MODFLOW-NWT-Guide/) - - -Disclaimer ----------- - -This software is preliminary or provisional and is subject to revision. It is -being provided to meet the need for timely best science. This software is -provided "as is" and "as-available", and makes no representations or warranties -of any kind concerning the software, whether express, implied, statutory, or -other. This includes, without limitation, warranties of title, -merchantability, fitness for a particular purpose, non-infringement, absence -of latent or other defects, accuracy, or the presence or absence of errors, -whether or not known or discoverable. - diff --git a/code.json b/code.json deleted file mode 100644 index 1471ab55d..000000000 --- a/code.json +++ /dev/null @@ -1,49 +0,0 @@ -[ - { - "status": "Preliminary", - "languages": [ - "python" - ], - "repositoryURL": "https://code.usgs.gov/usgs/modflow/flopy.git", - "disclaimerURL": "https://code.usgs.gov/usgs/modflow/flopy/blob/master/DISCLAIMER.md", - "name": "flopy", - "tags": [ - "MODFLOW", - "MODFLOW 6", - "MODFLOW-2005", - "MODFLOW-NWT", - "MODFLOW-USG", - "MODFLOW-2000", - "MT3DMS", - "MT3D-USGS", - "SEAWAT", - "MODPATH", - "groundwater model", - "transport model", - "python" - ], - "contact": { - "name": "Joseph D. Hughes", - "email": "jdhughes@usgs.gov" - }, - "downloadURL": "https://code.usgs.gov/usgs/modflow/flopy/archive/master.zip", - "vcs": "git", - "laborHours": -1, - "version": "3.8.0.dev0", - "date": { - "metadataLastUpdated": "2024-05-23" - }, - "organization": "U.S. Geological Survey", - "permissions": { - "licenses": [ - { - "URL": "https://code.usgs.gov/usgs/modflow/flopy/blob/master/LICENSE.md", - "name": "Public Domain, CC0-1.0" - } - ], - "usageType": "openSource" - }, - "homepageURL": "https://code.usgs.gov/usgs/modflow/flopy/", - "description": "FloPy is a python package to create, run, and post-process MODFLOW-based models." - } -] diff --git a/docs/PyPI_release.md b/docs/PyPI_release.md index fac5c4fbb..e56a38988 100644 --- a/docs/PyPI_release.md +++ b/docs/PyPI_release.md @@ -31,17 +31,3 @@ How to Cite *Software/Code citation for FloPy:* [Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.8.0.dev0 (preliminary): U.S. Geological Survey Software Release, 23 May 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH) - - -Disclaimer ----------- - -This software is preliminary or provisional and is subject to revision. It is -being provided to meet the need for timely best science. This software is -provided "as is" and "as-available", and makes no representations or warranties -of any kind concerning the software, whether express, implied, statutory, or -other. This includes, without limitation, warranties of title, -merchantability, fitness for a particular purpose, non-infringement, absence -of latent or other defects, accuracy, or the presence or absence of errors, -whether or not known or discoverable. - diff --git a/flopy/DISCLAIMER.md b/flopy/DISCLAIMER.md deleted file mode 100644 index 81ba20d03..000000000 --- a/flopy/DISCLAIMER.md +++ /dev/null @@ -1,11 +0,0 @@ -Disclaimer ----------- - -This software is preliminary or provisional and is subject to revision. It is -being provided to meet the need for timely best science. This software is -provided "as is" and "as-available", and makes no representations or warranties -of any kind concerning the software, whether express, implied, statutory, or -other. This includes, without limitation, warranties of title, -merchantability, fitness for a particular purpose, non-infringement, absence -of latent or other defects, accuracy, or the presence or absence of errors, -whether or not known or discoverable. diff --git a/scripts/README.md b/scripts/README.md index 71ed667f2..64b0536af 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -33,7 +33,6 @@ For instance, `e689af57e7439b9005749d806248897ad550eab5_20150811_041632_uncommit The `update_version.py` script can be used to update FloPy version numbers. Running the script first updates the version in `version.txt`, then propagates the change to various other places version strings or timestamps are embedded in the repository: - `flopy/version.py` -- `flopy/DISCLAIMER.md` - `CITATION.cff` - `README.md` - `docs/PyPI_release.md` @@ -46,12 +45,10 @@ If the script is run with no arguments, the version number is not changed, but u python scripts/update_version.py -v 3.3.6 ``` -To get the current version number, use the `--get` flag: +To get the current version number, use the `--get` flag (short `-g`): ```shell -python scripts/update_version.py +python scripts/update_version.py -g ``` This simply returns the contents of `version.txt` and does not write any changes to the repository's files. - -By default, the script assumes a local development version of FloPy. The `--approve` flag should be used prior to releasing a new FloPy version. This will alter the `DISCLAIMER.md` file, substituting wording to indicate the version is no longer preliminary but approved for official release. See [the release docs](../docs/make_release.md) for more information. diff --git a/scripts/update_version.py b/scripts/update_version.py index 3f309f19c..619a9e133 100644 --- a/scripts/update_version.py +++ b/scripts/update_version.py @@ -1,5 +1,4 @@ import argparse -import json import re import textwrap from datetime import datetime @@ -9,6 +8,14 @@ from filelock import FileLock from packaging.version import Version +_epilog = """\ +Update version information stored in version.txt in the project root, +as well as several other files in the repository. If --version is not +provided, the version number will not be changed. A file lock is held +to synchronize file access. The version tag must comply with standard +'..' format conventions for semantic versioning. +To show the version without changing anything, use --get (short -g). +""" _project_name = "flopy" _project_root_path = Path(__file__).parent.parent _version_txt_path = _project_root_path / "version.txt" @@ -17,53 +24,21 @@ # file names and the path to the file relative to the repo root directory file_paths_list = [ _project_root_path / "CITATION.cff", - _project_root_path / "code.json", _project_root_path / "README.md", _project_root_path / "docs" / "PyPI_release.md", _project_root_path / "flopy" / "version.py", - _project_root_path / "flopy" / "DISCLAIMER.md", ] file_paths = {pth.name: pth for pth in file_paths_list} # keys for each file -approved_disclaimer = """Disclaimer ----------- - -This software is provided "as is" and "as-available", and makes no -representations or warranties of any kind concerning the software, whether -express, implied, statutory, or other. This includes, without limitation, -warranties of title, merchantability, fitness for a particular purpose, -non-infringement, absence of latent or other defects, accuracy, or the -presence or absence of errors, whether or not known or discoverable. -""" - -preliminary_disclaimer = """Disclaimer ----------- - -This software is preliminary or provisional and is subject to revision. It is -being provided to meet the need for timely best science. This software is -provided "as is" and "as-available", and makes no representations or warranties -of any kind concerning the software, whether express, implied, statutory, or -other. This includes, without limitation, warranties of title, -merchantability, fitness for a particular purpose, non-infringement, absence -of latent or other defects, accuracy, or the presence or absence of errors, -whether or not known or discoverable. -""" - - def split_nonnumeric(s): match = re.compile("[^0-9]").search(s) return [s[: match.start()], s[match.start() :]] if match else s -_initial_version = Version("0.0.1") _current_version = Version(_version_txt_path.read_text().strip()) -def get_disclaimer(approved: bool = False): - return approved_disclaimer if approved else preliminary_disclaimer - - def update_version_txt(version: Version): with open(_version_txt_path, "w") as f: f.write(str(version)) @@ -81,15 +56,10 @@ def update_version_py(timestamp: datetime, version: Version): print(f"Updated {_version_py_path} to version {version}") -def get_software_citation( - timestamp: datetime, version: Version, approved: bool = False -): +def get_software_citation(timestamp: datetime, version: Version): # get data Software/Code citation for FloPy citation = yaml.safe_load(file_paths["CITATION.cff"].read_text()) - sb = "" - if not approved: - sb = " (preliminary)" # format author names authors = [] for author in citation["authors"]: @@ -116,7 +86,7 @@ def get_software_citation( # add the rest of the citation line += ( - f", {timestamp.year}, FloPy v{version}{sb}: " + f", {timestamp.year}, FloPy v{version}: " f"U.S. Geological Survey Software Release, {timestamp:%d %B %Y}, " "https://doi.org/10.5066/F7BK19FH]" "(https://doi.org/10.5066/F7BK19FH)" @@ -125,83 +95,44 @@ def get_software_citation( return line -def update_codejson( - timestamp: datetime, version: Version, approved: bool = False -): - # define json filename - json_fname = file_paths["code.json"] - - # load and modify json file - data = json.loads(json_fname.read_text()) - - # modify the json file data - data[0]["date"]["metadataLastUpdated"] = timestamp.strftime("%Y-%m-%d") - data[0]["version"] = str(version) - data[0]["status"] = "Release" if approved else "Preliminary" - - # rewrite the json file - with open(json_fname, "w") as f: - json.dump(data, f, indent=4) - f.write("\n") - - print(f"Updated {json_fname} to version {version}") - - -def update_readme_markdown( - timestamp: datetime, version: Version, approved: bool = False -): - # create disclaimer text - disclaimer = get_disclaimer(approved) - +def update_readme_markdown(timestamp: datetime, version: Version): # read README.md into memory fpth = file_paths["README.md"] lines = fpth.read_text().rstrip().split("\n") # rewrite README.md - terminate = False - f = open(fpth, "w") - for line in lines: - if "### Version " in line: - line = f"### Version {version}" - if not approved: - line += " (preliminary)" - elif "[flopy continuous integration]" in line: - line = ( - "[![flopy continuous integration](https://github.com/" - "modflowpy/flopy/actions/workflows/commit.yml/badge.svg?" - "branch=develop)](https://github.com/modflowpy/flopy/actions/" - "workflows/commit.yml)" - ) - elif "[Read the Docs]" in line: - line = ( - "[![Read the Docs](https://github.com/modflowpy/flopy/" - "actions/workflows/rtd.yml/badge.svg?branch=develop)]" - "(https://github.com/modflowpy/flopy/actions/" - "workflows/rtd.yml)" - ) - elif "[Coverage Status]" in line: - line = ( - "[![Coverage Status](https://coveralls.io/repos/github/" - "modflowpy/flopy/badge.svg?branch=develop)]" - "(https://coveralls.io/github/modflowpy/" - "flopy?branch=develop)" - ) - elif "doi.org/10.5066/F7BK19FH" in line: - line = get_software_citation(timestamp, version, approved) - elif "Disclaimer" in line: - line = disclaimer - terminate = True - f.write(f"{line}\n") - if terminate: - break + with open(fpth, "w") as f: + for line in lines: + if "### Version " in line: + line = f"### Version {version}" + elif "[flopy continuous integration]" in line: + line = ( + "[![flopy continuous integration](https://github.com/" + "modflowpy/flopy/actions/workflows/commit.yml/badge.svg?" + "branch=develop)](https://github.com/modflowpy/flopy/actions/" + "workflows/commit.yml)" + ) + elif "[Read the Docs]" in line: + line = ( + "[![Read the Docs](https://github.com/modflowpy/flopy/" + "actions/workflows/rtd.yml/badge.svg?branch=develop)]" + "(https://github.com/modflowpy/flopy/actions/" + "workflows/rtd.yml)" + ) + elif "[Coverage Status]" in line: + line = ( + "[![Coverage Status](https://coveralls.io/repos/github/" + "modflowpy/flopy/badge.svg?branch=develop)]" + "(https://coveralls.io/github/modflowpy/" + "flopy?branch=develop)" + ) + elif "doi.org/10.5066/F7BK19FH" in line: + line = get_software_citation(timestamp, version) + + f.write(f"{line}\n") - f.close() print(f"Updated {fpth} to version {version}") - # write disclaimer markdown file - file_paths["DISCLAIMER.md"].write_text(disclaimer) - print(f"Updated {file_paths['DISCLAIMER.md']} to version {version}") - def update_citation_cff(timestamp: datetime, version: Version): # read CITATION.cff to modify @@ -225,28 +156,18 @@ def update_citation_cff(timestamp: datetime, version: Version): print(f"Updated {fpth} to version {version}") -def update_PyPI_release( - timestamp: datetime, version: Version, approved: bool = False -): - # create disclaimer text - disclaimer = get_disclaimer(approved) - +def update_pypi_release(timestamp: datetime, version: Version): # read PyPI_release.md into memory fpth = file_paths["PyPI_release.md"] lines = fpth.read_text().rstrip().split("\n") # rewrite PyPI_release.md - terminate = False f = open(fpth, "w") for line in lines: if "doi.org/10.5066/F7BK19FH" in line: - line = get_software_citation(timestamp, version, approved) - elif "Disclaimer" in line: - line = disclaimer - terminate = True + line = get_software_citation(timestamp, version) + f.write(f"{line}\n") - if terminate: - break f.close() print(f"Updated {fpth} to version {version}") @@ -255,7 +176,6 @@ def update_PyPI_release( def update_version( timestamp: datetime = datetime.now(), version: Version = None, - approved: bool = False, ): lock_path = Path(_version_txt_path.name + ".lock") try: @@ -270,10 +190,9 @@ def update_version( with lock: update_version_txt(version) update_version_py(timestamp, version) - update_readme_markdown(timestamp, version, approved) + update_readme_markdown(timestamp, version) update_citation_cff(timestamp, version) - update_codejson(timestamp, version, approved) - update_PyPI_release(timestamp, version, approved) + update_pypi_release(timestamp, version) finally: try: lock_path.unlink() @@ -285,15 +204,7 @@ def update_version( parser = argparse.ArgumentParser( prog=f"Update {_project_name} version", formatter_class=argparse.RawDescriptionHelpFormatter, - epilog=textwrap.dedent( - """\ - Update version information stored in version.txt in the project root, - as well as several other files in the repository. If --version is not - provided, the version number will not be changed. A file lock is held - to synchronize file access. The version tag must comply with standard - '..' format conventions for semantic versioning. - """ - ), + epilog=textwrap.dedent(_epilog), ) parser.add_argument( "-v", @@ -301,13 +212,6 @@ def update_version( required=False, help="Specify the release version", ) - parser.add_argument( - "-a", - "--approve", - required=False, - action="store_true", - help="Approve the release (defaults false)", - ) parser.add_argument( "-g", "--get", @@ -318,14 +222,11 @@ def update_version( args = parser.parse_args() if args.get: - print( - Version((_project_root_path / "version.txt").read_text().strip()) - ) + print(_current_version) else: update_version( timestamp=datetime.now(), version=( Version(args.version) if args.version else _current_version ), - approved=args.approve, ) From baf8dff95ae3cc55adee54ec3e141437ae153b9c Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Fri, 21 Jun 2024 08:22:59 -0400 Subject: [PATCH 35/57] refactor(get-modflow): support ARM macs by default (previously opt-in) (#2225) An ARM mac distribution has been added to the executables repository, we can now detect arch and install the suitable binaries for all three repositories (mf6, nightly build, executables). --- autotest/test_get_modflow.py | 18 +++++++----------- flopy/utils/get_modflow.py | 15 +++------------ 2 files changed, 10 insertions(+), 23 deletions(-) diff --git a/autotest/test_get_modflow.py b/autotest/test_get_modflow.py index 3a93aa7ec..d5dc4d679 100644 --- a/autotest/test_get_modflow.py +++ b/autotest/test_get_modflow.py @@ -116,18 +116,16 @@ def test_get_release(repo): tag = "latest" release = get_release(repo=repo, tag=tag) assets = release["assets"] - - expected_assets = ["linux.zip", "mac.zip", "win64.zip"] + expected_assets = ["linux.zip", "mac.zip", "macarm.zip", "win64.zip"] expected_ostags = [a.replace(".zip", "") for a in expected_assets] actual_assets = [asset["name"] for asset in assets] if repo == "modflow6": - # can remove if modflow6 releases follow asset name conventions followed in executables and nightly build repos + # can remove if modflow6 releases follow the same asset name + # convention used in the executables and nightly build repos assert {a.rpartition("_")[2] for a in actual_assets} >= { a for a in expected_assets if not a.startswith("win") } - elif repo == "modflow6-nightly-build": - expected_assets.append("macarm.zip") else: for ostag in expected_ostags: assert any( @@ -142,15 +140,13 @@ def test_select_bindir(bindir, function_tmpdir): pytest.skip(f"{expected_path} is not writable") selected = select_bindir(f":{bindir}") + # For some reason sys.prefix can return different python + # installs when invoked here and get_modflow.py on macOS. + # Work around by just comparing the end of the bin path, + # should be .../Python.framework/Versions//bin if system() != "Darwin": assert selected == expected_path else: - # for some reason sys.prefix can return different python - # installs when invoked here and get_modflow.py on macOS - # https://github.com/modflowpy/flopy/actions/runs/3331965840/jobs/5512345032#step:8:1835 - # - # work around by just comparing the end of the bin path - # should be .../Python.framework/Versions//bin assert selected.parts[-4:] == expected_path.parts[-4:] diff --git a/flopy/utils/get_modflow.py b/flopy/utils/get_modflow.py index 4666fcfe4..fae43b063 100755 --- a/flopy/utils/get_modflow.py +++ b/flopy/utils/get_modflow.py @@ -62,7 +62,8 @@ def get_ostag() -> str: elif sys.platform.startswith("win"): return "win" + ("64" if sys.maxsize > 2**32 else "32") elif sys.platform.startswith("darwin"): - return "mac" + arch = processor() + return "mac" + (arch if arch == "arm" else "") raise ValueError(f"platform {sys.platform!r} not supported") @@ -407,19 +408,9 @@ def run_main( # get the selected release release = get_release(owner, repo, release_id, quiet) assets = release.get("assets", []) - asset_names = [a["name"] for a in assets] for asset in assets: asset_name = asset["name"] if ostag in asset_name: - # temporary hack for nightly gfortran build for ARM macs - # todo: clean up if/when all repos have an ARM mac build - if ( - repo == "modflow6-nightly-build" - and "macarm.zip" in asset_names - and processor() == "arm" - and ostag == "mac.zip" - ): - continue break else: raise ValueError( @@ -608,7 +599,7 @@ def add_item(key, fname, do_chmod): break shutil.rmtree(str(bindir_path)) - if ostag in ["linux", "mac", "macarm"]: + if "win" not in ostag: # similar to "chmod +x fname" for each executable for fname in chmod: pth = bindir / fname From 0748dcb9e4641b5ad9616af115dd3be906f98f50 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Fri, 21 Jun 2024 19:40:21 -0400 Subject: [PATCH 36/57] docs(readme): remove numpy pin in README.md (#2248) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c364d247a..8eef94c5f 100644 --- a/README.md +++ b/README.md @@ -35,7 +35,7 @@ Installation FloPy requires **Python** 3.8+ with: ``` -numpy >=1.20.3,<2.0.0 +numpy >=1.20.3 matplotlib >=1.4.0 pandas >=2.0.0 ``` From 9dd66c72d43a4206d551c30a18fd333accbcd85d Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Mon, 24 Jun 2024 13:11:42 -0400 Subject: [PATCH 37/57] ci(macos): use intel triangle build for now (#2249) Workaround MODFLOW-USGS/executables#33. --- .github/workflows/benchmark.yml | 8 ++++++++ .github/workflows/commit.yml | 8 ++++++++ .github/workflows/examples.yml | 8 ++++++++ .github/workflows/rtd.yml | 8 ++++++++ 4 files changed, 32 insertions(+) diff --git a/.github/workflows/benchmark.yml b/.github/workflows/benchmark.yml index 79ba1028d..a3de02b4d 100644 --- a/.github/workflows/benchmark.yml +++ b/.github/workflows/benchmark.yml @@ -40,6 +40,14 @@ jobs: - name: Install Modflow executables uses: modflowpy/install-modflow-action@v1 + - name: Install triangle (macOS workaround) + if: runner.os == 'macOS' + uses: modflowpy/install-modflow-action@v1 + with: + repo: executables + ostag: mac + subset: triangle + - name: Run benchmarks working-directory: autotest run: | diff --git a/.github/workflows/commit.yml b/.github/workflows/commit.yml index 23c4cc5d6..07e7b73bc 100644 --- a/.github/workflows/commit.yml +++ b/.github/workflows/commit.yml @@ -166,6 +166,14 @@ jobs: with: repo: modflow6-nightly-build + - name: Install triangle (macOS workaround) + if: runner.os == 'macOS' + uses: modflowpy/install-modflow-action@v1 + with: + repo: executables + ostag: mac + subset: triangle + - name: Update package classes run: python -m flopy.mf6.utils.generate_classes --ref develop --no-backup diff --git a/.github/workflows/examples.yml b/.github/workflows/examples.yml index 2b78ec044..590a1578f 100644 --- a/.github/workflows/examples.yml +++ b/.github/workflows/examples.yml @@ -60,6 +60,14 @@ jobs: with: repo: modflow6-nightly-build + - name: Install triangle (macOS workaround) + if: runner.os == 'macOS' + uses: modflowpy/install-modflow-action@v1 + with: + repo: executables + ostag: mac + subset: triangle + - name: Update FloPy packages run: python -m flopy.mf6.utils.generate_classes --ref develop --no-backup diff --git a/.github/workflows/rtd.yml b/.github/workflows/rtd.yml index 0096cc2ee..ec667df1c 100644 --- a/.github/workflows/rtd.yml +++ b/.github/workflows/rtd.yml @@ -82,6 +82,14 @@ jobs: with: repo: modflow6-nightly-build + - name: Install triangle (macOS workaround) + if: runner.os == 'macOS' + uses: modflowpy/install-modflow-action@v1 + with: + repo: executables + ostag: mac + subset: triangle + - name: Run tutorial and example notebooks working-directory: autotest run: pytest -v -n auto test_notebooks.py From a2a159f1758781fc633710f68af5441eb1e4dafb Mon Sep 17 00:00:00 2001 From: martclanor Date: Mon, 24 Jun 2024 23:20:38 +0200 Subject: [PATCH 38/57] fix(Raster): reclassify np.float64 correctly (#2235) --- flopy/utils/rasters.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/flopy/utils/rasters.py b/flopy/utils/rasters.py index f8f965edb..e56fee2cd 100644 --- a/flopy/utils/rasters.py +++ b/flopy/utils/rasters.py @@ -45,7 +45,7 @@ class Raster: """ - FLOAT32 = (float, np.float32, np.float64) + FLOAT32 = (float, np.float32) FLOAT64 = (np.float64,) INT8 = (np.int8, np.uint8) INT16 = (np.int16, np.uint16) From 9db562a3b1d18af3801036b1d79d74668c0f71c6 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Mon, 24 Jun 2024 20:32:17 -0400 Subject: [PATCH 39/57] fix(HeadFile): fix dis reversal, expand tests (#2247) Fix the write logic for head file reversal to properly write each layer. Add a test for each discretization type. Fix in-place reverse by writing to a temp file first then renaming. Improve docstrings. Resolves #2246 --- autotest/test_binaryfile.py | 197 +++++++++++++++++++++++++++++++----- flopy/utils/binaryfile.py | 184 +++++++++++++++++++-------------- 2 files changed, 279 insertions(+), 102 deletions(-) diff --git a/autotest/test_binaryfile.py b/autotest/test_binaryfile.py index f420721bd..e56e20d9c 100644 --- a/autotest/test_binaryfile.py +++ b/autotest/test_binaryfile.py @@ -4,6 +4,7 @@ """ from itertools import repeat +from pprint import pformat import numpy as np import pandas as pd @@ -27,7 +28,7 @@ write_budget, write_head, ) -from flopy.utils.gridutil import uniform_flow_field +from flopy.utils.gridutil import get_disv_kwargs, uniform_flow_field @pytest.fixture @@ -475,47 +476,184 @@ def test_binaryfile_read_context(freyberg_model_path): assert str(e.value) == "seek of closed file", str(e.value) -def test_headfile_reverse_mf6(example_data_path, function_tmpdir): +def test_binaryfile_reverse_mf6_dis(function_tmpdir): + name = "reverse_dis" + sim = flopy.mf6.MFSimulation( + sim_name=name, sim_ws=function_tmpdir, exe_name="mf6" + ) + tdis_rc = [(1, 1, 1.0), (1, 1, 1.0)] + nper = len(tdis_rc) + tdis = flopy.mf6.ModflowTdis(sim, nper=nper, perioddata=tdis_rc) + ims = flopy.mf6.ModflowIms(sim) + gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) + dis = flopy.mf6.ModflowGwfdis(gwf, nrow=10, ncol=10) + dis = gwf.get_package("DIS") + nlay = 2 + botm = [1 - (k + 1) for k in range(nlay)] + botm_data = np.array([list(repeat(b, 10 * 10)) for b in botm]).reshape( + (nlay, 10, 10) + ) + dis.nlay = nlay + dis.botm.set_data(botm_data) + ic = flopy.mf6.ModflowGwfic(gwf) + npf = flopy.mf6.ModflowGwfnpf(gwf, save_specific_discharge=True) + chd = flopy.mf6.ModflowGwfchd( + gwf, stress_period_data=[[(0, 0, 0), 1.0], [(0, 9, 9), 0.0]] + ) + budget_file = name + ".bud" + head_file = name + ".hds" + oc = flopy.mf6.ModflowGwfoc( + gwf, + budget_filerecord=budget_file, + head_filerecord=head_file, + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + sim.write_simulation(silent=True) + success, buff = sim.run_simulation(silent=True, report=True) + assert success, pformat(buff) + + # reverse head file in place and check reversal + head_file = flopy.utils.HeadFile(function_tmpdir / head_file, tdis=tdis) + heads = head_file.get_alldata() + assert heads.shape == (nper, 2, 10, 10) + head_file.reverse() + heads_rev = head_file.get_alldata() + assert heads_rev.shape == (nper, 2, 10, 10) + + # reverse budget and write to separate file + budget_file_rev_path = function_tmpdir / f"{budget_file}_rev" + budget_file = flopy.utils.CellBudgetFile( + function_tmpdir / budget_file, tdis=tdis + ) + budget_file.reverse(budget_file_rev_path) + budget_file_rev = flopy.utils.CellBudgetFile( + budget_file_rev_path, tdis=tdis + ) + + for kper in range(nper): + assert np.allclose(heads[kper], heads_rev[-kper + 1]) + budget = budget_file.get_data(text="FLOW-JA-FACE", totim=kper)[0] + budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=kper)[ + 0 + ] + assert budget.shape == budget_rev.shape + assert np.allclose(budget, -budget_rev) + + +def test_binaryfile_reverse_mf6_disv(function_tmpdir): + name = "reverse_disv" + sim = flopy.mf6.MFSimulation( + sim_name=name, sim_ws=function_tmpdir, exe_name="mf6" + ) + tdis_rc = [(1, 1, 1.0), (1, 1, 1.0)] + nper = len(tdis_rc) + tdis = flopy.mf6.ModflowTdis(sim, nper=nper, perioddata=tdis_rc) + ims = flopy.mf6.ModflowIms(sim) + gwf = flopy.mf6.ModflowGwf(sim, modelname=name, save_flows=True) + dis = flopy.mf6.ModflowGwfdisv( + gwf, **get_disv_kwargs(2, 10, 10, 1.0, 1.0, 25.0, [20.0, 15.0]) + ) + ic = flopy.mf6.ModflowGwfic(gwf) + npf = flopy.mf6.ModflowGwfnpf(gwf, save_specific_discharge=True) + chd = flopy.mf6.ModflowGwfchd( + gwf, stress_period_data=[[(0, 0, 0), 1.0], [(0, 9, 9), 0.0]] + ) + budget_file = name + ".bud" + head_file = name + ".hds" + oc = flopy.mf6.ModflowGwfoc( + gwf, + budget_filerecord=budget_file, + head_filerecord=head_file, + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + sim.write_simulation(silent=True) + success, buff = sim.run_simulation(silent=True) + assert success, pformat(buff) + + # reverse head file in place and check reversal + head_file = flopy.utils.HeadFile(function_tmpdir / head_file, tdis=tdis) + heads = head_file.get_alldata() + assert heads.shape == (nper, 2, 1, 100) + head_file.reverse() + heads_rev = head_file.get_alldata() + assert heads_rev.shape == (nper, 2, 1, 100) + + # reverse budget and write to separate file + budget_file_rev_path = function_tmpdir / f"{budget_file}_rev" + budget_file = flopy.utils.CellBudgetFile( + function_tmpdir / budget_file, tdis=tdis + ) + budget_file.reverse(budget_file_rev_path) + budget_file_rev = flopy.utils.CellBudgetFile( + budget_file_rev_path, tdis=tdis + ) + + for kper in range(nper): + assert np.allclose(heads[kper], heads_rev[-kper + 1]) + budget = budget_file.get_data(text="FLOW-JA-FACE", totim=kper)[0] + budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=kper)[ + 0 + ] + assert budget.shape == budget_rev.shape + assert np.allclose(budget, -budget_rev) + + +def test_binaryfile_reverse_mf6_disu(example_data_path, function_tmpdir): # load simulation and extract tdis sim_name = "test006_gwf3" sim = flopy.mf6.MFSimulation.load( sim_name=sim_name, sim_ws=example_data_path / "mf6" / sim_name ) - tdis = sim.get_package("tdis") + tdis_rc = [(1, 1, 1.0), (1, 1, 1.0)] + nper = len(tdis_rc) + tdis = flopy.mf6.ModflowTdis( + sim, time_units="DAYS", nper=nper, perioddata=tdis_rc + ) + sim.set_sim_path(function_tmpdir) + sim.write_simulation() + sim.run_simulation() # load head file, providing tdis as kwarg - model_path = example_data_path / "mf6" / sim_name - file_stem = "flow_adj" - file_path = model_path / "expected_output" / f"{file_stem}.hds" - f = HeadFile(file_path, tdis=tdis) - assert isinstance(f, HeadFile) - - # reverse the file - rf_name = f"{file_stem}_rev.hds" - f.reverse(filename=function_tmpdir / rf_name) - rf = HeadFile(function_tmpdir / rf_name) - assert isinstance(rf, HeadFile) + file_path = function_tmpdir / "flow.hds" + head_file = HeadFile(file_path, tdis=tdis) + + # reverse and write to a separate file + head_file_rev_path = function_tmpdir / "flow_rev.hds" + head_file.reverse(filename=head_file_rev_path) + head_file_rev = HeadFile(head_file_rev_path, tdis=tdis) + + # load budget file + file_path = function_tmpdir / "flow.cbc" + budget_file = CellBudgetFile(file_path, tdis=tdis) + + # reverse and write to a separate file + budget_file_rev_path = function_tmpdir / "flow_rev.cbc" + budget_file.reverse(filename=budget_file_rev_path) + budget_file_rev = CellBudgetFile(budget_file_rev_path, tdis=tdis) # check that data from both files have the same shape - assert f.get_alldata().shape == (1, 1, 1, 121) - assert rf.get_alldata().shape == (1, 1, 1, 121) + assert head_file.get_alldata().shape == (nper, 1, 1, 121) + assert head_file_rev.get_alldata().shape == (nper, 1, 1, 121) # check number of records - assert len(f) == 1 - assert len(rf) == 1 + assert len(head_file) == nper + assert len(head_file_rev) == nper + assert len(budget_file) == nper * 2 + assert len(budget_file_rev) == nper * 2 # check that the data are reversed - nrecords = len(f) + nrecords = len(head_file) for idx in range(nrecords - 1, -1, -1): - # check headers - f_header = list(f.recordarray[nrecords - idx - 1]) - rf_header = list(rf.recordarray[idx]) - # todo: these should be equal! + # check headfile headers + f_header = list(head_file.recordarray[nrecords - idx - 1]) + rf_header = list(head_file_rev.recordarray[idx]) assert f_header != rf_header - # check data - f_data = f.get_data(idx=idx)[0] - rf_data = rf.get_data(idx=nrecords - idx - 1)[0] + # check headfile data + f_data = head_file.get_data(idx=idx)[0] + rf_data = head_file_rev.get_data(idx=nrecords - idx - 1)[0] assert f_data.shape == rf_data.shape if f_data.ndim == 1: for row in range(len(f_data)): @@ -525,6 +663,13 @@ def test_headfile_reverse_mf6(example_data_path, function_tmpdir): else: assert np.array_equal(f_data[0][0], rf_data[0][0]) + budget = budget_file.get_data(text="FLOW-JA-FACE", totim=idx)[0] + budget_rev = budget_file_rev.get_data(text="FLOW-JA-FACE", totim=idx)[ + 0 + ] + assert budget.shape == budget_rev.shape + assert np.allclose(budget, -budget_rev) + @pytest.fixture @pytest.mark.mf6 diff --git a/flopy/utils/binaryfile.py b/flopy/utils/binaryfile.py index 94074f427..fb7141092 100644 --- a/flopy/utils/binaryfile.py +++ b/flopy/utils/binaryfile.py @@ -10,6 +10,7 @@ """ import os +import tempfile import warnings from pathlib import Path from typing import List, Optional, Union @@ -20,6 +21,8 @@ from ..utils.datafile import Header, LayerFile from .gridutil import get_lni +HEAD_TEXT = " HEAD" + def write_head( fbin, @@ -28,7 +31,7 @@ def write_head( kper=1, pertim=1.0, totim=1.0, - text=" HEAD", + text=HEAD_TEXT, ilay=1, ): dt = np.dtype( @@ -662,81 +665,99 @@ def __init__( def reverse(self, filename: Optional[os.PathLike] = None): """ - Write a new binary head file with the records in reverse order. - If a new filename is not provided, or if the filename is the same - as the existing filename, the file will be overwritten and data - reloaded from the rewritten/reversed file. + Reverse the time order of the currently loaded binary head file. If a head + file name is not provided or the provided name is the same as the existing + filename, the file will be overwritten and reloaded. Parameters ---------- filename : str or PathLike - Path of the new reversed binary file to create. + Path of the reversed binary head file. """ filename = ( Path(filename).expanduser().absolute() - if filename + if filename is not None else self.filename ) - # header array formats - dt = np.dtype( - [ - ("kstp", np.int32), - ("kper", np.int32), - ("pertim", np.float64), - ("totim", np.float64), - ("text", "S16"), - ("ncol", np.int32), - ("nrow", np.int32), - ("ilay", np.int32), - ] - ) - - # make sure we have tdis - if self.tdis is None or not any(self.tdis.perioddata.get_data()): - raise ValueError("tdis mu/st be known to reverse head file") - - # extract period data - pd = self.tdis.perioddata.get_data() - - # get maximum period number and total simulation time - kpermx = len(pd) - 1 - tsimtotal = 0.0 - for tpd in pd: - tsimtotal += tpd[0] - - # get total number of records - nrecords = len(self) - - # open backward file - with open(filename, "wb") as fbin: - # loop over head file records in reverse order - for idx in range(nrecords - 1, -1, -1): - # load header array - header = self.recordarray[idx].copy() - - # reverse kstp and kper in the header array - (kstp, kper) = (header["kstp"] - 1, header["kper"] - 1) - kstpmx = pd[kper][1] - 1 - kstpb = kstpmx - kstp - kperb = kpermx - kper - (header["kstp"], header["kper"]) = (kstpb + 1, kperb + 1) + def get_max_kper_kstp_tsim(): + header = self.recordarray[-1] + kper = header["kper"] - 1 + tsim = header["totim"] + kstp = {0: 0} + for i in range(len(self) - 1, -1, -1): + header = self.recordarray[i] + if ( + header["kper"] in kstp + and header["kstp"] > kstp[header["kper"]] + ): + kstp[header["kper"]] += 1 + else: + kstp[header["kper"]] = 0 + return kper, kstp, tsim + + # get max period and time from the head file + maxkper, maxkstp, maxtsim = get_max_kper_kstp_tsim() + # if we have tdis, get max period number and simulation time from it + tdis_maxkper, tdis_maxtsim = None, None + if self.tdis is not None: + pd = self.tdis.perioddata.get_data() + if any(pd): + tdis_maxkper = len(pd) - 1 + tdis_maxtsim = sum([p[0] for p in pd]) + # if we have both, check them against each other + if tdis_maxkper is not None: + assert maxkper == tdis_maxkper, ( + f"Max stress period in binary head file ({maxkper}) != " + f"max stress period in provided tdis ({tdis_maxkper})" + ) + assert maxtsim == tdis_maxtsim, ( + f"Max simulation time in binary head file ({maxtsim}) != " + f"max simulation time in provided tdis ({tdis_maxtsim})" + ) - # reverse totim and pertim in the header array - header["totim"] = tsimtotal - header["totim"] - perlen = pd[kper][0] - header["pertim"] = perlen - header["pertim"] + def reverse_header(header): + """Reverse period, step and time fields in the record header""" + + # reverse kstp and kper headers + kstp = header["kstp"] - 1 + kper = header["kper"] - 1 + header["kstp"] = maxkstp[kper] - kstp + 1 + header["kper"] = maxkper - kper + 1 + + # reverse totim and pertim headers + header["totim"] = maxtsim - header["totim"] + perlen = pd[kper][0] + header["pertim"] = perlen - header["pertim"] + return header + + # reverse record order and write to temporary file + temp_dir_path = Path(tempfile.gettempdir()) + temp_file_path = temp_dir_path / filename.name + with open(temp_file_path, "wb") as f: + for i in range(len(self) - 1, -1, -1): + header = self.recordarray[i].copy() + header = reverse_header(header) + data = self.get_data(idx=i) + ilay = header["ilay"] + write_head( + fbin=f, + data=data[ilay - 1], + kstp=header["kstp"], + kper=header["kper"], + pertim=header["pertim"], + totim=header["totim"], + ilay=ilay, + ) - # write header information - h = np.array(header, dtype=dt) - h.tofile(fbin) + # if we're rewriting the original file, close it first + if filename == self.filename: + self.close() - # load and write data - data = self.get_data(idx=idx)[0][0] - data = np.array(data, dtype=np.float64) - data.tofile(fbin) + # move temp file to destination + temp_file_path.replace(filename) # if we rewrote the original file, reinitialize if filename == self.filename: @@ -2241,21 +2262,23 @@ def close(self): def reverse(self, filename: Optional[os.PathLike] = None): """ - Write a binary cell budget file with the records in reverse order. - If a new filename is not provided, or if the filename is the same - as the existing filename, the file will be overwritten and data - reloaded from the rewritten/reversed file. + Reverse the time order and signs of the currently loaded binary cell budget + file. If a file name is not provided or if the provided name is the same as + the existing filename, the file will be overwritten and reloaded. - Parameters - ---------- + Notes + ----- + While `HeadFile.reverse()` reverses only the temporal order of head data, + this method must reverse not only the order but also the sign (direction) + of the model's intercell flows. filename : str or PathLike, optional - Path of the new reversed binary cell budget file to create. + Path of the reversed binary cell budget file. """ filename = ( Path(filename).expanduser().absolute() - if filename + if filename is not None else self.filename ) @@ -2303,7 +2326,9 @@ def reverse(self, filename: Optional[os.PathLike] = None): nrecords = len(self) # open backward budget file - with open(filename, "wb") as fbin: + temp_dir_path = Path(tempfile.gettempdir()) + temp_file_path = temp_dir_path / filename.name + with open(temp_file_path, "wb") as f: # loop over budget file records in reverse order for idx in range(nrecords - 1, -1, -1): # load header array @@ -2338,7 +2363,7 @@ def reverse(self, filename: Optional[os.PathLike] = None): ] # Note: much of the code below is based on binary_file_writer.py h = np.array(h, dtype=dt1) - h.tofile(fbin) + h.tofile(f) if header["imeth"] == 6: # Write additional header information to the backward budget file h = header[ @@ -2350,7 +2375,7 @@ def reverse(self, filename: Optional[os.PathLike] = None): ] ] h = np.array(h, dtype=dt2) - h.tofile(fbin) + h.tofile(f) # Load data data = self.get_data(idx)[0] data = np.array(data) @@ -2361,7 +2386,7 @@ def reverse(self, filename: Optional[os.PathLike] = None): ndat = len(colnames) - 2 dt = np.dtype([("ndat", np.int32)]) h = np.array([(ndat,)], dtype=dt) - h.tofile(fbin) + h.tofile(f) # Write auxiliary column names naux = ndat - 1 if naux > 0: @@ -2373,12 +2398,12 @@ def reverse(self, filename: Optional[os.PathLike] = None): [(colname, "S16") for colname in colnames[3:]] ) h = np.array(auxtxt, dtype=dt) - h.tofile(fbin) + h.tofile(f) # Write nlist nlist = data.shape[0] dt = np.dtype([("nlist", np.int32)]) h = np.array([(nlist,)], dtype=dt) - h.tofile(fbin) + h.tofile(f) elif header["imeth"] == 1: # Load data data = self.get_data(idx)[0][0][0] @@ -2388,7 +2413,14 @@ def reverse(self, filename: Optional[os.PathLike] = None): else: raise ValueError("not expecting imeth " + header["imeth"]) # Write data - data.tofile(fbin) + data.tofile(f) + + # if we're rewriting the original file, close it first + if filename == self.filename: + self.close() + + # move temp file to destination + temp_file_path.replace(filename) # if we rewrote the original file, reinitialize if filename == self.filename: From 71fbd262037246b5b1ce1cef8f1176ea9c2a9e75 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Wed, 3 Jul 2024 08:41:30 -0400 Subject: [PATCH 40/57] ci(rtd): add workflow_dispatch trigger to rtd.yml (#2254) This lets us trigger the RTD build at will, which will allow fixing problems like #2253 without pushing commits. Also some ruff updates: ignore e721 globally for now, ignore f632 in-place. --- .github/workflows/rtd.yml | 18 +++++++++++++----- flopy/mf6/data/mfdataarray.py | 2 +- pyproject.toml | 1 + 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/.github/workflows/rtd.yml b/.github/workflows/rtd.yml index ec667df1c..6c876a9bd 100644 --- a/.github/workflows/rtd.yml +++ b/.github/workflows/rtd.yml @@ -1,16 +1,15 @@ name: FloPy documentation - on: push: pull_request: branches: - master - develop + workflow_dispatch: concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: - rtd_build: name: Prepare and test notebooks runs-on: ${{ matrix.os }} @@ -95,7 +94,13 @@ jobs: run: pytest -v -n auto test_notebooks.py - name: Upload notebooks artifact for ReadtheDocs - if: github.repository_owner == 'modflowpy' && github.event_name == 'push' && runner.os == 'Linux' + if: | + github.repository_owner == 'modflowpy' && + runner.os == 'Linux' && + ( + github.event_name == 'push' || + github.event_name == 'workflow_dispatch' + ) uses: actions/upload-artifact@v4 with: name: notebooks-for-${{ github.sha }} @@ -106,9 +111,12 @@ jobs: name: Read the Docs trigger needs: rtd_build runs-on: ubuntu-latest - if: - github.repository_owner == 'modflowpy' && github.event_name == 'push' + github.repository_owner == 'modflowpy' && + ( + github.event_name == 'push' || + github.event_name == 'workflow_dispatch' + ) steps: - name: Trigger RTDs build on master and develop branches uses: dfm/rtds-action@v1 diff --git a/flopy/mf6/data/mfdataarray.py b/flopy/mf6/data/mfdataarray.py index d3c02dbdb..661b91cc8 100644 --- a/flopy/mf6/data/mfdataarray.py +++ b/flopy/mf6/data/mfdataarray.py @@ -733,7 +733,7 @@ def _get_data(self, layer=None, apply_mult=False, **kwargs): "array" in kwargs and kwargs["array"] and isinstance(self, MFTransientArray) - and data is not [] + and data is not [] # noqa: F632 ): data = np.expand_dims(data, 0) return data diff --git a/pyproject.toml b/pyproject.toml index f53c5e6cd..f18d5cd16 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -142,6 +142,7 @@ ignore = [ "E501", # line too long TODO FIXME "E712", # Avoid equality comparisons to `True` "E722", # do not use bare `except` + "E721", # use `is`/`is not` for type comparisons "E741", # ambiguous variable name "F401", # unused import "F403", # unable to detect undefined names (star imports) From 5aea2294e511cfa521bfe0fcb2cc7a3dd0b76ae3 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Wed, 3 Jul 2024 11:33:33 -0400 Subject: [PATCH 41/57] docs(examples): move swt henry example to mt3d/swt section (#2255) Motivated by #2252 --- .docs/Notebooks/seawat_henry_example.py | 1 + 1 file changed, 1 insertion(+) diff --git a/.docs/Notebooks/seawat_henry_example.py b/.docs/Notebooks/seawat_henry_example.py index 1c8575460..7307f9015 100644 --- a/.docs/Notebooks/seawat_henry_example.py +++ b/.docs/Notebooks/seawat_henry_example.py @@ -10,6 +10,7 @@ # kernelspec: # display_name: Python 3 (ipykernel) # language: python +# section: mt3d # name: python3 # --- From 9ea598bf874d903a0407b99d331b12015e5dd31a Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Wed, 3 Jul 2024 15:02:59 -0400 Subject: [PATCH 42/57] docs(examples): drop duplicative mp7 examples (#2256) I think these can be removed since each has an equivalent and more complete duplicate in the mf6 examples repository, this also speeds up cumulative notebook runtime so we can eventually run them on RTD --- .../Notebooks/modpath7_structured_example.py | 484 --------------- .../modpath7_structured_transient_example.py | 406 ------------ .../modpath7_unstructured_example.py | 561 ----------------- .../modpath7_unstructured_lateral_example.py | 576 ------------------ 4 files changed, 2027 deletions(-) delete mode 100644 .docs/Notebooks/modpath7_structured_example.py delete mode 100644 .docs/Notebooks/modpath7_structured_transient_example.py delete mode 100644 .docs/Notebooks/modpath7_unstructured_example.py delete mode 100644 .docs/Notebooks/modpath7_unstructured_lateral_example.py diff --git a/.docs/Notebooks/modpath7_structured_example.py b/.docs/Notebooks/modpath7_structured_example.py deleted file mode 100644 index 299da4b2d..000000000 --- a/.docs/Notebooks/modpath7_structured_example.py +++ /dev/null @@ -1,484 +0,0 @@ -# --- -# jupyter: -# jupytext: -# notebook_metadata_filter: all -# text_representation: -# extension: .py -# format_name: light -# format_version: '1.5' -# jupytext_version: 1.14.5 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# metadata: -# section: modpath -# authors: -# - name: Joseph Hughes -# --- - -# # Using MODPATH 7 with structured grids -# -# This notebook demonstrates how to create and run example 1a from the MODPATH 7 documentation for MODFLOW-2005 and MODFLOW 6. The notebooks also shows how to create subsets of endpoint output and plot MODPATH results on PlotMapView objects. - -import os - -# + -import sys -from tempfile import TemporaryDirectory - -import matplotlib as mpl -import matplotlib.pyplot as plt -import numpy as np -from numpy.lib.recfunctions import repack_fields - -import flopy - -print(sys.version) -print(f"numpy version: {np.__version__}") -print(f"matplotlib version: {mpl.__version__}") -print(f"flopy version: {flopy.__version__}") - -# temporary directory -temp_dir = TemporaryDirectory() -workspace = temp_dir.name -# - - -# ### Flow model data - -nper, nstp, perlen, tsmult = 1, 1, 1.0, 1.0 -nlay, nrow, ncol = 3, 21, 20 -delr = delc = 500.0 -top = 400.0 -botm = [220.0, 200.0, 0.0] -laytyp = [1, 0, 0] -kh = [50.0, 0.01, 200.0] -kv = [10.0, 0.01, 20.0] -wel_loc = (2, 10, 9) -wel_q = -150000.0 -rch = 0.005 -riv_h = 320.0 -riv_z = 317.0 -riv_c = 1.0e5 - -# ### MODPATH 7 data - -# + -# MODPATH zones -zone3 = np.ones((nrow, ncol), dtype=np.int32) -zone3[wel_loc[1:]] = 2 -zones = [1, 1, zone3] - -# create particles -# particle group 1 -plocs = [] -pids = [] -for idx in range(nrow): - plocs.append((0, idx, 2)) - pids.append(idx) -part0 = flopy.modpath.ParticleData( - plocs, drape=0, structured=True, particleids=pids -) -pg0 = flopy.modpath.ParticleGroup( - particlegroupname="PG1", particledata=part0, filename="ex01a.pg1.sloc" -) - -# particle group 2 -v = [(2, 0, 0), (0, 20, 0)] -part1 = flopy.modpath.ParticleData( - v, drape=1, structured=True, particleids=[1000, 1001] -) -pg1 = flopy.modpath.ParticleGroup( - particlegroupname="PG2", particledata=part1, filename="ex01a.pg2.sloc" -) - -locsa = [[0, 0, 0, 0, nrow - 1, ncol - 1], [1, 0, 0, 1, nrow - 1, ncol - 1]] -locsb = [[2, 0, 0, 2, nrow - 1, ncol - 1]] -sd = flopy.modpath.CellDataType( - drape=0, columncelldivisions=1, rowcelldivisions=1, layercelldivisions=1 -) -p = flopy.modpath.LRCParticleData( - subdivisiondata=[sd, sd], lrcregions=[locsa, locsb] -) -pg2 = flopy.modpath.ParticleGroupLRCTemplate( - particlegroupname="PG3", particledata=p, filename="ex01a.pg3.sloc" -) - -particlegroups = [pg2] - -# default iface for MODFLOW-2005 and MODFLOW 6 -defaultiface = {"RECHARGE": 6, "ET": 6} -defaultiface6 = {"RCH": 6, "EVT": 6} -# - - -# ### MODPATH 7 using MODFLOW-2005 -# -# #### Create and run MODFLOW-2005 - -# + -ws = os.path.join(workspace, "mp7_ex1_mf2005_dis") -nm = "ex01_mf2005" -exe_name = "mf2005" -iu_cbc = 130 -m = flopy.modflow.Modflow(nm, model_ws=ws, exe_name=exe_name) -flopy.modflow.ModflowDis( - m, - nlay=nlay, - nrow=nrow, - ncol=ncol, - nper=nper, - itmuni=4, - lenuni=2, - perlen=perlen, - nstp=nstp, - tsmult=tsmult, - steady=True, - delr=delr, - delc=delc, - top=top, - botm=botm, -) -flopy.modflow.ModflowLpf( - m, ipakcb=iu_cbc, laytyp=laytyp, hk=kh, vka=kv, constantcv=True -) -flopy.modflow.ModflowBas(m, ibound=1, strt=top) -# recharge -flopy.modflow.ModflowRch(m, ipakcb=iu_cbc, rech=rch) -# wel -wd = [i for i in wel_loc] + [wel_q] -flopy.modflow.ModflowWel(m, ipakcb=iu_cbc, stress_period_data={0: wd}) -# river -rd = [] -for i in range(nrow): - rd.append([0, i, ncol - 1, riv_h, riv_c, riv_z]) -flopy.modflow.ModflowRiv(m, ipakcb=iu_cbc, stress_period_data={0: rd}) -# output control -flopy.modflow.ModflowOc( - m, stress_period_data={(0, 0): ["save head", "save budget", "print head"]} -) -flopy.modflow.ModflowPcg(m, hclose=1e-6, rclose=1e-6) - -m.write_input() -success, buff = m.run_model(silent=True, report=True) -assert success, "mf2005 model did not run" -for line in buff: - print(line) -# - - -# #### Create and run MODPATH 7 - -# + -# create modpath files -exe_name = "mp7" -mp = flopy.modpath.Modpath7( - modelname=f"{nm}_mp", flowmodel=m, exe_name=exe_name, model_ws=ws -) -mpbas = flopy.modpath.Modpath7Bas(mp, porosity=0.1, defaultiface=defaultiface) -mpsim = flopy.modpath.Modpath7Sim( - mp, - simulationtype="combined", - trackingdirection="forward", - weaksinkoption="pass_through", - weaksourceoption="pass_through", - budgetoutputoption="summary", - budgetcellnumbers=[1049, 1259], - traceparticledata=[1, 1000], - referencetime=[0, 0, 0.0], - stoptimeoption="extend", - timepointdata=[500, 1000.0], - zonedataoption="on", - zones=zones, - particlegroups=particlegroups, -) - -# write modpath datasets -mp.write_input() - -# run modpath -success, buff = mp.run_model(silent=True, report=True) -assert success, "mp7 failed to run" -for line in buff: - print(line) -# - - -# #### Load MODPATH 7 output - -# Get locations to extract pathline data - -nodew = m.dis.get_node([wel_loc]) -riv_locs = repack_fields(m.riv.stress_period_data[0][["k", "i", "j"]]) -nodesr = m.dis.get_node(riv_locs.tolist()) - -# Pathline data - -fpth = os.path.join(ws, f"{nm}_mp.mppth") -p = flopy.utils.PathlineFile(fpth) -pw0 = p.get_destination_pathline_data(nodew, to_recarray=True) -pr0 = p.get_destination_pathline_data(nodesr, to_recarray=True) - -# Endpoint data -# -# Get particles that terminate in the well - -fpth = os.path.join(ws, f"{nm}_mp.mpend") -e = flopy.utils.EndpointFile(fpth) -well_epd = e.get_destination_endpoint_data(dest_cells=nodew) -well_epd.shape - -# Get particles that terminate in the river boundaries - -riv_epd = e.get_destination_endpoint_data(dest_cells=nodesr) -riv_epd.shape - -# Merge the particles that end in the well and the river boundaries. - -epd0 = np.concatenate((well_epd, riv_epd)) -epd0.shape - -# #### Plot MODPATH 7 output - -mm = flopy.plot.PlotMapView(model=m) -mm.plot_grid(lw=0.5) -mm.plot_pathline(pw0, layer="all", colors="blue", label="captured by wells") -mm.plot_pathline(pr0, layer="all", colors="green", label="captured by rivers") -mm.plot_endpoint(epd0, direction="starting", colorbar=True) -mm.ax.legend() - -# ### MODPATH 7 using MODFLOW 6 -# -# #### Create and run MODFLOW 6 - -# + -ws = os.path.join(workspace, "mp7_ex1_mf6_dis") -nm = "ex01_mf6" -exe_name = "mf6" - -# Create the Flopy simulation object -sim = flopy.mf6.MFSimulation( - sim_name=nm, exe_name="mf6", version="mf6", sim_ws=ws -) - -# Create the Flopy temporal discretization object -pd = (perlen, nstp, tsmult) -tdis = flopy.mf6.modflow.mftdis.ModflowTdis( - sim, pname="tdis", time_units="DAYS", nper=nper, perioddata=[pd] -) - -# Create the Flopy groundwater flow (gwf) model object -model_nam_file = f"{nm}.nam" -gwf = flopy.mf6.ModflowGwf( - sim, modelname=nm, model_nam_file=model_nam_file, save_flows=True -) - -# Create the Flopy iterative model solver (ims) Package object -ims = flopy.mf6.modflow.mfims.ModflowIms( - sim, - pname="ims", - complexity="SIMPLE", - outer_dvclose=1e-6, - inner_dvclose=1e-6, - rcloserecord=1e-6, -) - -# create gwf file -dis = flopy.mf6.modflow.mfgwfdis.ModflowGwfdis( - gwf, - pname="dis", - nlay=nlay, - nrow=nrow, - ncol=ncol, - length_units="FEET", - delr=delr, - delc=delc, - top=top, - botm=botm, -) -# Create the initial conditions package -ic = flopy.mf6.modflow.mfgwfic.ModflowGwfic(gwf, pname="ic", strt=top) - -# Create the node property flow package -npf = flopy.mf6.modflow.mfgwfnpf.ModflowGwfnpf( - gwf, pname="npf", icelltype=laytyp, k=kh, k33=kv -) - - -# recharge -flopy.mf6.modflow.mfgwfrcha.ModflowGwfrcha(gwf, recharge=rch) -# wel -wd = [(wel_loc, wel_q)] -flopy.mf6.modflow.mfgwfwel.ModflowGwfwel( - gwf, maxbound=1, stress_period_data={0: wd} -) -# river -rd = [] -for i in range(nrow): - rd.append([(0, i, ncol - 1), riv_h, riv_c, riv_z]) -flopy.mf6.modflow.mfgwfriv.ModflowGwfriv(gwf, stress_period_data={0: rd}) -# Create the output control package -headfile = f"{nm}.hds" -head_record = [headfile] -budgetfile = f"{nm}.cbb" -budget_record = [budgetfile] -saverecord = [("HEAD", "ALL"), ("BUDGET", "ALL")] -oc = flopy.mf6.modflow.mfgwfoc.ModflowGwfoc( - gwf, - pname="oc", - saverecord=saverecord, - head_filerecord=head_record, - budget_filerecord=budget_record, -) - -# Write the datasets -sim.write_simulation() -# Run the simulation -success, buff = sim.run_simulation(silent=True, report=True) -assert success, "mf6 model did not run" -for line in buff: - print(line) -# - - -# #### Create and run MODPATH 7 - -# + -# create modpath files -exe_name = "mp7" -mp = flopy.modpath.Modpath7( - modelname=f"{nm}_mp", flowmodel=gwf, exe_name=exe_name, model_ws=ws -) -mpbas = flopy.modpath.Modpath7Bas(mp, porosity=0.1, defaultiface=defaultiface6) -mpsim = flopy.modpath.Modpath7Sim( - mp, - simulationtype="combined", - trackingdirection="forward", - weaksinkoption="pass_through", - weaksourceoption="pass_through", - budgetoutputoption="summary", - budgetcellnumbers=[1049, 1259], - traceparticledata=[1, 1000], - referencetime=[0, 0, 0.0], - stoptimeoption="extend", - timepointdata=[500, 1000.0], - zonedataoption="on", - zones=zones, - particlegroups=particlegroups, -) - -# write modpath datasets -mp.write_input() - -# run modpath -success, buff = mp.run_model(silent=True, report=True) -assert success, "mp7 failed to run" -for line in buff: - print(line) -# - - -# #### Load MODPATH 7 output -# -# Pathline data - -fpth = os.path.join(ws, f"{nm}_mp.mppth") -p = flopy.utils.PathlineFile(fpth) -pw1 = p.get_destination_pathline_data(nodew, to_recarray=True) -pr1 = p.get_destination_pathline_data(nodesr, to_recarray=True) - -# Endpoint data -# -# Get particles that terminate in the well - -fpth = os.path.join(ws, f"{nm}_mp.mpend") -e = flopy.utils.EndpointFile(fpth) -well_epd = e.get_destination_endpoint_data(dest_cells=nodew) - -# Get particles that terminate in the river boundaries - -riv_epd = e.get_destination_endpoint_data(dest_cells=nodesr) - -# Merge the particles that end in the well and the river boundaries. - -epd1 = np.concatenate((well_epd, riv_epd)) - -# ### Plot MODPATH 7 output -# - -mm = flopy.plot.PlotMapView(model=gwf) -mm.plot_grid(lw=0.5) -mm.plot_pathline(pw1, layer="all", colors="blue", label="captured by wells") -mm.plot_pathline(pr1, layer="all", colors="green", label="captured by rivers") -mm.plot_endpoint(epd1, direction="starting", colorbar=True) -mm.ax.legend() - -# ### Compare MODPATH results -# -# Compare MODPATH results for MODFLOW-2005 and MODFLOW 6. Also show pathline points every 5th point. - -# + -f, axes = plt.subplots(ncols=3, nrows=1, sharey=True, figsize=(15, 10)) -axes = axes.flatten() -ax = axes[0] -ax.set_aspect("equal") -mm = flopy.plot.PlotMapView(model=m, ax=ax) -mm.plot_grid(lw=0.5) -mm.plot_pathline( - pw0, - layer="all", - colors="blue", - lw=1, - marker="o", - markercolor="black", - markersize=3, - markerevery=5, -) -mm.plot_pathline( - pr0, - layer="all", - colors="green", - lw=1, - marker="o", - markercolor="black", - markersize=3, - markerevery=5, -) -ax.set_title("MODFLOW-2005") - -ax = axes[1] -ax.set_aspect("equal") -mm = flopy.plot.PlotMapView(model=gwf, ax=ax) -mm.plot_grid(lw=0.5) -mm.plot_pathline( - pw1, - layer="all", - colors="blue", - lw=1, - marker="o", - markercolor="black", - markersize=3, - markerevery=5, -) -mm.plot_pathline( - pr1, - layer="all", - colors="green", - lw=1, - marker="o", - markercolor="black", - markersize=3, - markerevery=5, -) -ax.set_title("MODFLOW 6") - - -ax = axes[2] -ax.set_aspect("equal") -mm = flopy.plot.PlotMapView(model=m, ax=ax) -mm.plot_grid(lw=0.5) -mm.plot_pathline(pw1, layer="all", colors="blue", lw=1, label="MODFLOW 6") -mm.plot_pathline( - pw0, layer="all", colors="blue", lw=1, linestyle=":", label="MODFLOW-2005" -) -mm.plot_pathline(pr1, layer="all", colors="green", lw=1, label="_none") -mm.plot_pathline( - pr0, layer="all", colors="green", lw=1, linestyle=":", label="_none" -) -ax.legend() -ax.set_title("MODFLOW 2005 and MODFLOW 6") diff --git a/.docs/Notebooks/modpath7_structured_transient_example.py b/.docs/Notebooks/modpath7_structured_transient_example.py deleted file mode 100644 index c72280b3c..000000000 --- a/.docs/Notebooks/modpath7_structured_transient_example.py +++ /dev/null @@ -1,406 +0,0 @@ -# --- -# jupyter: -# jupytext: -# notebook_metadata_filter: all -# text_representation: -# extension: .py -# format_name: light -# format_version: '1.5' -# jupytext_version: 1.14.4 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# language_info: -# codemirror_mode: -# name: ipython -# version: 3 -# file_extension: .py -# mimetype: text/x-python -# name: python -# nbconvert_exporter: python -# pygments_lexer: ipython3 -# version: 3.9.12 -# metadata: -# authors: -# - name: Wes Bonelli -# section: modpath -# --- - -# # Using MODPATH 7 with structured grids (transient example) -# -# This notebook reproduces example 3a from the MODPATH 7 documentation, demonstrating a transient MODFLOW 6 simulation based on the same flow system as the basic structured and unstructured examples. Particles are released at 10 20-day intervals for the first 200 days of the simulation. 2 discharge wells are added 100,000 days into the simulation and pump at a constant rate for the remainder. There are three stress periods: -# -# | Stress period | Type | Time steps | Length (days) | -# |:--------------|:-------------|:-----------|:--------------| -# | 1 | steady-state | 1 | 100000 | -# | 2 | transient | 10 | 36500 | -# | 3 | steady-state | 1 | 100000 | -# -# ## Setting up the simulation -# -# First import FloPy and set up a temporary workspace. - -# + -import sys -from pathlib import Path -from tempfile import TemporaryDirectory - -import matplotlib as mpl -import matplotlib.pyplot as plt -import numpy as np - -proj_root = Path.cwd().parent.parent - -import flopy - -print(sys.version) -print(f"numpy version: {np.__version__}") -print(f"matplotlib version: {mpl.__version__}") -print(f"flopy version: {flopy.__version__}") - -temp_dir = TemporaryDirectory() -sim_name = "mp7_ex03a_mf6" -workspace = Path(temp_dir.name) / sim_name -# - - -# Define flow model data. - -nlay, nrow, ncol = 3, 21, 20 -delr = delc = 500.0 -top = 400.0 -botm = [220.0, 200.0, 0.0] -laytyp = [1, 0, 0] -kh = [50.0, 0.01, 200.0] -kv = [10.0, 0.01, 20.0] -rch = 0.005 -riv_h = 320.0 -riv_z = 317.0 -riv_c = 1.0e5 - -# Define well data. Although this notebook will refer to layer/row/column indices starting at 1, indices in FloPy (and more generally in Python) are zero-based. A negative discharge indicates pumping, while a positive value indicates injection. - -wells = [ - # layer, row, col, discharge - (0, 10, 9, -75000), - (2, 12, 4, -100000), -] - -# Define the drain location. - -drain = (0, 14, (9, 20)) - -# Configure locations for particle tracking to terminate. We have three explicitly defined termination zones: -# -# - `2`: the well in layer 1, at row 11, column 10 -# - `3`: the well in layer 3, at row 13, column 5 -# - `4`: the drain in layer 1, running through row 15 from column 10-20 -# -# MODFLOW 6 reserves zone number `1` to indicate that particles may move freely within the zone. -# -# The river running through column 20 is also a termination zone, but it doesn't need to be defined separately since we are using the RIV package. - -# + -zone_maps = [] - - -# zone 1 is the default (non-terminating regions) -def fill_zone_1(): - return np.ones((nrow, ncol), dtype=np.int32) - - -# zone map for layer 1 -za = fill_zone_1() -za[wells[0][1:3]] = 2 -za[drain[1], drain[2][0] : drain[2][1]] = 4 -zone_maps.append(za) - -# constant layer 2 (zone 1) -zone_maps.append(1) - -# zone map for layer 3 -za = fill_zone_1() -za[wells[1][1:3]] = 3 -zone_maps.append(za) -# - - -# Define particles to track. We release particles from the top of a 2x2 square of cells in the upper left of the model grid's top layer. - -rel_minl = rel_maxl = 1 -rel_minr = 2 -rel_maxr = 3 -rel_minc = 2 -rel_maxc = 3 -sd = flopy.modpath.CellDataType( - drape=0 -) # particles added at top of cell (no drape) -pd = flopy.modpath.LRCParticleData( - subdivisiondata=[sd], - lrcregions=[ - [[rel_minl, rel_minr, rel_minc, rel_maxl, rel_maxr, rel_maxc]] - ], -) -pg = flopy.modpath.ParticleGroupLRCTemplate( - particlegroupname="PG1", particledata=pd, filename=f"{sim_name}.pg1.sloc" -) -pgs = [pg] -defaultiface = {"RECHARGE": 6, "ET": 6} - -# Create the MODFLOW 6 simulation. - -# + -# simulation -sim = flopy.mf6.MFSimulation( - sim_name=sim_name, exe_name="mf6", version="mf6", sim_ws=workspace -) - -# temporal discretization -nper = 3 -pd = [ - # perlen, nstp, tsmult - (100000, 1, 1), - (36500, 10, 1), - (100000, 1, 1), -] -tdis = flopy.mf6.modflow.mftdis.ModflowTdis( - sim, pname="tdis", time_units="DAYS", nper=nper, perioddata=pd -) - -# groundwater flow (gwf) model -model_nam_file = f"{sim_name}.nam" -gwf = flopy.mf6.ModflowGwf( - sim, modelname=sim_name, model_nam_file=model_nam_file, save_flows=True -) - -# iterative model solver (ims) package -ims = flopy.mf6.modflow.mfims.ModflowIms( - sim, - pname="ims", - complexity="SIMPLE", - outer_dvclose=1e-6, - inner_dvclose=1e-6, - rcloserecord=1e-6, -) - -# grid discretization -dis = flopy.mf6.modflow.mfgwfdis.ModflowGwfdis( - gwf, - pname="dis", - nlay=nlay, - nrow=nrow, - ncol=ncol, - length_units="FEET", - delr=delr, - delc=delc, - top=top, - botm=botm, -) - -# initial conditions -ic = flopy.mf6.modflow.mfgwfic.ModflowGwfic(gwf, pname="ic", strt=top) - -# node property flow -npf = flopy.mf6.modflow.mfgwfnpf.ModflowGwfnpf( - gwf, pname="npf", icelltype=laytyp, k=kh, k33=kv -) - -# recharge -rch = flopy.mf6.modflow.mfgwfrcha.ModflowGwfrcha(gwf, recharge=rch) - - -# wells -def no_flow(w): - return w[0], w[1], w[2], 0 - - -wel = flopy.mf6.modflow.mfgwfwel.ModflowGwfwel( - gwf, - maxbound=1, - stress_period_data={0: [no_flow(w) for w in wells], 1: wells, 2: wells}, -) - -# river -rd = [[(0, i, ncol - 1), riv_h, riv_c, riv_z] for i in range(nrow)] -flopy.mf6.modflow.mfgwfriv.ModflowGwfriv( - gwf, stress_period_data={0: rd, 1: rd, 2: rd} -) - -# drain (set auxiliary IFACE var to 6 for top of cell) -dd = [ - [drain[0], drain[1], i + drain[2][0], 322.5, 100000.0, 6] - for i in range(drain[2][1] - drain[2][0]) -] -drn = flopy.mf6.modflow.mfgwfdrn.ModflowGwfdrn( - gwf, auxiliary=["IFACE"], stress_period_data={0: dd} -) - -# output control -headfile = f"{sim_name}.hds" -head_record = [headfile] -budgetfile = f"{sim_name}.cbb" -budget_record = [budgetfile] -saverecord = [("HEAD", "ALL"), ("BUDGET", "ALL")] -oc = flopy.mf6.modflow.mfgwfoc.ModflowGwfoc( - gwf, - pname="oc", - saverecord=saverecord, - head_filerecord=head_record, - budget_filerecord=budget_record, -) - - -# - - -# Take a look at the model grid before running the simulation. - - -# + -def add_release(ax): - ax.add_patch( - mpl.patches.Rectangle( - (2 * delc, (nrow - 2) * delr), - 1000, - -1000, - facecolor="green", - ) - ) - - -def add_legend(ax): - ax.legend( - handles=[ - mpl.patches.Patch(color="teal", label="river"), - mpl.patches.Patch(color="red", label="wells "), - mpl.patches.Patch(color="yellow", label="drain"), - mpl.patches.Patch(color="green", label="release"), - ] - ) - - -fig = plt.figure(figsize=(8, 8)) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -mv = flopy.plot.PlotMapView(model=gwf) -mv.plot_grid() -mv.plot_bc("DRN") -mv.plot_bc("RIV") -mv.plot_bc("WEL", plotAll=True) # include both wells (1st and 3rd layer) -add_release(ax) -add_legend(ax) -plt.show() -# - - -# ## Running the simulation -# -# Run the MODFLOW 6 flow simulation. - -sim.write_simulation() -success, buff = sim.run_simulation(silent=True, report=True) -assert success, "Failed to run simulation." -for line in buff: - print(line) - -# Create and run MODPATH 7 particle tracking model in `combined` mode, which includes both pathline and timeseries. - -# + -# create modpath files -mp = flopy.modpath.Modpath7( - modelname=f"{sim_name}_mp", - flowmodel=gwf, - exe_name="mp7", - model_ws=workspace, -) -mpbas = flopy.modpath.Modpath7Bas(mp, porosity=0.1, defaultiface=defaultiface) -mpsim = flopy.modpath.Modpath7Sim( - mp, - simulationtype="combined", - trackingdirection="forward", - weaksinkoption="pass_through", - weaksourceoption="pass_through", - budgetoutputoption="summary", - referencetime=[0, 0, 0.9], - timepointdata=[10, 20.0], # release every 20 days, for 200 days - zonedataoption="on", - zones=zone_maps, - particlegroups=pgs, -) - -mp.write_input() -success, buff = mp.run_model(silent=True, report=True) -assert success -for line in buff: - print(line) -# - - -# ## Inspecting results -# -# First we need the particle termination locations. - -wel_locs = [w[0:3] for w in wells] -riv_locs = [(0, i, 19) for i in range(20)] -drn_locs = [(drain[0], drain[1], d) for d in range(drain[2][0], drain[2][1])] -wel_nids = gwf.modelgrid.get_node(wel_locs) -riv_nids = gwf.modelgrid.get_node(riv_locs) -drn_nids = gwf.modelgrid.get_node(drn_locs) - -# Next, load pathline data from the MODPATH 7 pathline output file, filtering by termination location. - -# + -fpth = workspace / f"{sim_name}_mp.mppth" -p = flopy.utils.PathlineFile(fpth) - -pl1 = p.get_destination_pathline_data(wel_nids, to_recarray=True) -pl2 = p.get_destination_pathline_data(riv_nids + drn_nids, to_recarray=True) -# - - -# Load endpoint data from the MODPATH 7 endpoint output file. - -# + -fpth = workspace / f"{sim_name}_mp.mpend" -e = flopy.utils.EndpointFile(fpth) - -ep1 = e.get_destination_endpoint_data(dest_cells=wel_nids) -ep2 = e.get_destination_endpoint_data(dest_cells=riv_nids + drn_nids) -# - - -# Extract head data from the GWF model's output files. - -hf = flopy.utils.HeadFile(workspace / f"{sim_name}.hds") -head = hf.get_data() - -# Plot heads over a map view of the model, then add particle starting points and pathlines. The apparent number of particle starting locations is less than the total number of particles because a separate particle begins at each location every 20 days during the release period at the beginning of the simulation. - -# + -fig = plt.figure(figsize=(10, 10)) -ax = fig.add_subplot(1, 1, 1, aspect="equal") - -mv = flopy.plot.PlotMapView(model=gwf) -mv.plot_grid(lw=0.5) -mv.plot_bc("DRN") -mv.plot_bc("RIV") -mv.plot_bc("WEL", plotAll=True) -hd = mv.plot_array(head, alpha=0.1) -cb = plt.colorbar(hd, shrink=0.5) -cb.set_label("Head") -mv.plot_pathline( - pl1, layer="all", alpha=0.1, colors=["red"], lw=2, label="captured by well" -) -mv.plot_pathline( - pl2, - layer="all", - alpha=0.1, - colors=["blue"], - lw=2, - label="captured by drain/river", -) -add_release(ax) -mv.ax.legend() -plt.show() -# - - -# Clean up the temporary directory. - -try: - # ignore PermissionError on Windows - temp_dir.cleanup() -except: - pass diff --git a/.docs/Notebooks/modpath7_unstructured_example.py b/.docs/Notebooks/modpath7_unstructured_example.py deleted file mode 100644 index 54e4ee3c0..000000000 --- a/.docs/Notebooks/modpath7_unstructured_example.py +++ /dev/null @@ -1,561 +0,0 @@ -# --- -# jupyter: -# jupytext: -# notebook_metadata_filter: all -# text_representation: -# extension: .py -# format_name: light -# format_version: '1.5' -# jupytext_version: 1.14.5 -# kernelspec: -# display_name: Python 3 (ipykernel) -# language: python -# name: python3 -# metadata: -# section: modpath -# authors: -# - name: Joseph Hughes -# --- - -# # Using MODPATH 7 with a DISV unstructured model -# -# This is a replication of the MODPATH Problem 2 example that is described on page 12 of the modpath_7_examples.pdf file. The results shown here should be the same as the results in the MODPATH example, however, the vertex and node numbering used here may be different from the numbering used in MODPATH, so head values may not be compared directly without some additional mapping. - -# ## Part I. Setup Notebook - -import os - -# + -import sys -from pathlib import Path -from tempfile import TemporaryDirectory - -import matplotlib as mpl -import matplotlib.pyplot as plt -import numpy as np - -proj_root = Path.cwd().parent.parent - -import flopy - -print(sys.version) -print(f"numpy version: {np.__version__}") -print(f"matplotlib version: {mpl.__version__}") -print(f"flopy version: {flopy.__version__}") - -# temporary directory -temp_dir = TemporaryDirectory() -workspace = Path(temp_dir.name) -# - - -# ## Part II. Gridgen Creation of Model Grid -# -# Create the base model grid. - -Lx = 10000.0 -Ly = 10500.0 -nlay = 3 -nrow = 21 -ncol = 20 -delr = Lx / ncol -delc = Ly / nrow -top = 400 -botm = [220, 200, 0] - -ms = flopy.modflow.Modflow() -dis5 = flopy.modflow.ModflowDis( - ms, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - top=top, - botm=botm, -) - -# Create the `Gridgen` object. - -# + -from flopy.utils.gridgen import Gridgen - -model_name = "mp7p2_u" -model_ws = workspace / "mp7_ex2" / "mf6" -gridgen_ws = model_ws / "gridgen" -g = Gridgen(ms.modelgrid, model_ws=gridgen_ws) -# - - -# Refine the grid. - -# + -rf0shp = gridgen_ws / "rf0" -xmin = 7 * delr -xmax = 12 * delr -ymin = 8 * delc -ymax = 13 * delc -rfpoly = [ - [ - list( - reversed( - [ - (xmin, ymin), - (xmax, ymin), - (xmax, ymax), - (xmin, ymax), - (xmin, ymin), - ] - ) - ) - ] -] -g.add_refinement_features(rfpoly, "polygon", 1, range(nlay)) - -rf1shp = gridgen_ws / "rf1" -xmin = 8 * delr -xmax = 11 * delr -ymin = 9 * delc -ymax = 12 * delc -rfpoly = [ - [ - list( - reversed( - [ - (xmin, ymin), - (xmax, ymin), - (xmax, ymax), - (xmin, ymax), - (xmin, ymin), - ] - ) - ) - ] -] -g.add_refinement_features(rfpoly, "polygon", 2, range(nlay)) - -rf2shp = gridgen_ws / "rf2" -xmin = 9 * delr -xmax = 10 * delr -ymin = 10 * delc -ymax = 11 * delc -rfpoly = [ - [ - list( - reversed( - [ - (xmin, ymin), - (xmax, ymin), - (xmax, ymax), - (xmin, ymax), - (xmin, ymin), - ] - ) - ) - ] -] -g.add_refinement_features(rfpoly, "polygon", 3, range(nlay)) -# - - -# Show the model grid with refinement levels superimposed. - -fig = plt.figure(figsize=(5, 5), constrained_layout=True) -ax = fig.add_subplot(1, 1, 1) -mm = flopy.plot.PlotMapView(model=ms) -mm.plot_grid() -flopy.plot.plot_shapefile(rf0shp, ax=ax, facecolor="yellow", edgecolor="none") -flopy.plot.plot_shapefile(rf1shp, ax=ax, facecolor="pink", edgecolor="none") -flopy.plot.plot_shapefile(rf2shp, ax=ax, facecolor="red", edgecolor="none") - -# Build the refined grid. - -g.build(verbose=False) - -# Show the refined grid. - -fig = plt.figure(figsize=(5, 5), constrained_layout=True) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -g.plot(ax, linewidth=0.5) - -# Extract the refined grid's properties. - -gridprops = g.get_gridprops_disv() -ncpl = gridprops["ncpl"] -top = gridprops["top"] -botm = gridprops["botm"] -nvert = gridprops["nvert"] -vertices = gridprops["vertices"] -cell2d = gridprops["cell2d"] - -# ## Part III. Create the Flopy Model - -# + -# create simulation -sim = flopy.mf6.MFSimulation( - sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=model_ws -) - -# create tdis package -tdis_rc = [(1000.0, 1, 1.0)] -tdis = flopy.mf6.ModflowTdis( - sim, pname="tdis", time_units="DAYS", perioddata=tdis_rc -) - -# create gwf model -gwf = flopy.mf6.ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" -) -gwf.name_file.save_flows = True - -# create iterative model solution and register the gwf model with it -ims = flopy.mf6.ModflowIms( - sim, - pname="ims", - print_option="SUMMARY", - complexity="SIMPLE", - outer_dvclose=1.0e-5, - outer_maximum=100, - under_relaxation="NONE", - inner_maximum=100, - inner_dvclose=1.0e-6, - rcloserecord=0.1, - linear_acceleration="BICGSTAB", - scaling_method="NONE", - reordering_method="NONE", - relaxation_factor=0.99, -) -sim.register_ims_package(ims, [gwf.name]) - -# disv -disv = flopy.mf6.ModflowGwfdisv( - gwf, - nlay=nlay, - ncpl=ncpl, - top=top, - botm=botm, - nvert=nvert, - vertices=vertices, - cell2d=cell2d, -) - -# initial conditions -ic = flopy.mf6.ModflowGwfic(gwf, pname="ic", strt=320.0) - -# node property flow -npf = flopy.mf6.ModflowGwfnpf( - gwf, - xt3doptions=[("xt3d")], - icelltype=[1, 0, 0], - k=[50.0, 0.01, 200.0], - k33=[10.0, 0.01, 20.0], -) - -# wel -wellpoints = [(4750.0, 5250.0)] -welcells = g.intersect(wellpoints, "point", 0) -# welspd = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=1, aux_vars=['iface']) -welspd = [[(2, icpl), -150000, 0] for icpl in welcells["nodenumber"]] -wel = flopy.mf6.ModflowGwfwel( - gwf, print_input=True, auxiliary=[("iface",)], stress_period_data=welspd -) - -# rch -aux = [np.ones(ncpl, dtype=int) * 6] -rch = flopy.mf6.ModflowGwfrcha( - gwf, recharge=0.005, auxiliary=[("iface",)], aux={0: [6]} -) -# riv -riverline = [[(Lx - 1.0, Ly), (Lx - 1.0, 0.0)]] -rivcells = g.intersect(riverline, "line", 0) -rivspd = [[(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"]] -riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd) - -# output control -oc = flopy.mf6.ModflowGwfoc( - gwf, - pname="oc", - budget_filerecord=f"{model_name}.cbb", - head_filerecord=f"{model_name}.hds", - headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], - saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], -) -# - - -# Now write the simulation input files. - -sim.write_simulation() - -# ## Part IV. Run the MODFLOW 6 Model - -success, buff = sim.run_simulation(silent=True, report=True) -assert success, "mf6 failed to run" -for line in buff: - print(line) - -# ## Part V. Import and Plot the Results - -# Plot the boundary conditions on the grid. - -fname = os.path.join(model_ws, f"{model_name}.disv.grb") -grd = flopy.mf6.utils.MfGrdFile(fname, verbose=False) -mg = grd.modelgrid -ibd = np.zeros((ncpl), dtype=int) -ibd[welcells["nodenumber"]] = 1 -ibd[rivcells["nodenumber"]] = 2 -ibd = np.ma.masked_equal(ibd, 0) -fig = plt.figure(figsize=(8, 8), constrained_layout=True) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -pmv = flopy.plot.PlotMapView(modelgrid=mg, ax=ax) -ax.set_xlim(0, Lx) -ax.set_ylim(0, Ly) -cmap = mpl.colors.ListedColormap( - [ - "r", - "g", - ] -) -pc = pmv.plot_array(ibd, cmap=cmap, edgecolor="gray") -t = ax.set_title("Boundary Conditions\n") - -fname = os.path.join(model_ws, f"{model_name}.hds") -hdobj = flopy.utils.HeadFile(fname) -head = hdobj.get_data() -head.shape - -ilay = 2 -cint = 0.25 -fig = plt.figure(figsize=(8, 8), constrained_layout=True) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -mm = flopy.plot.PlotMapView(modelgrid=mg, ax=ax, layer=ilay) -ax.set_xlim(0, Lx) -ax.set_ylim(0, Ly) -pc = mm.plot_array(head[:, 0, :], cmap="jet", edgecolor="black") -hmin = head[ilay, 0, :].min() -hmax = head[ilay, 0, :].max() -levels = np.arange(np.floor(hmin), np.ceil(hmax) + cint, cint) -cs = mm.contour_array(head[:, 0, :], colors="white", levels=levels) -plt.clabel(cs, fmt="%.1f", colors="white", fontsize=11) -cb = plt.colorbar(pc, shrink=0.5) -t = ax.set_title(f"Model Layer {ilay + 1}; hmin={hmin:6.2f}, hmax={hmax:6.2f}") - -# Inspect model cells and vertices. - -# + -# zoom area -xmin, xmax = 2000, 4500 -ymin, ymax = 5400, 7500 - -mg.get_cell_vertices -fig = plt.figure(figsize=(8, 8), constrained_layout=True) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -mm = flopy.plot.PlotMapView(modelgrid=mg, ax=ax) -v = mm.plot_grid(edgecolor="black") -t = ax.set_title("Model Cells and Vertices (one-based)\n") -ax.set_xlim(xmin, xmax) -ax.set_ylim(ymin, ymax) - -verts = mg.verts -ax.plot(verts[:, 0], verts[:, 1], "bo") -for i in range(ncpl): - x, y = verts[i, 0], verts[i, 1] - if xmin <= x <= xmax and ymin <= y <= ymax: - ax.annotate(str(i + 1), verts[i, :], color="b") - -xc, yc = mg.get_xcellcenters_for_layer(0), mg.get_ycellcenters_for_layer(0) -for i in range(ncpl): - x, y = xc[i], yc[i] - ax.plot(x, y, "ro") - if xmin <= x <= xmax and ymin <= y <= ymax: - ax.annotate(str(i + 1), (x, y), color="r") -# - - -# ## Part VI. Create the Flopy MODPATH7 Models -# -# Define names for the MODPATH 7 simulations. - -mp_namea = f"{model_name}a_mp" -mp_nameb = f"{model_name}b_mp" - -# Create particles for the pathline and timeseries analysis. - -# + -pcoord = np.array( - [ - [0.000, 0.125, 0.500], - [0.000, 0.375, 0.500], - [0.000, 0.625, 0.500], - [0.000, 0.875, 0.500], - [1.000, 0.125, 0.500], - [1.000, 0.375, 0.500], - [1.000, 0.625, 0.500], - [1.000, 0.875, 0.500], - [0.125, 0.000, 0.500], - [0.375, 0.000, 0.500], - [0.625, 0.000, 0.500], - [0.875, 0.000, 0.500], - [0.125, 1.000, 0.500], - [0.375, 1.000, 0.500], - [0.625, 1.000, 0.500], - [0.875, 1.000, 0.500], - ] -) -nodew = gwf.disv.ncpl.array * 2 + welcells["nodenumber"][0] -plocs = [nodew for i in range(pcoord.shape[0])] - -# create particle data -pa = flopy.modpath.ParticleData( - plocs, - structured=False, - localx=pcoord[:, 0], - localy=pcoord[:, 1], - localz=pcoord[:, 2], - drape=0, -) - -# create backward particle group -fpth = f"{mp_namea}.sloc" -pga = flopy.modpath.ParticleGroup( - particlegroupname="BACKWARD1", particledata=pa, filename=fpth -) -# - - -# Create particles for endpoint analysis. - -facedata = flopy.modpath.FaceDataType( - drape=0, - verticaldivisions1=10, - horizontaldivisions1=10, - verticaldivisions2=10, - horizontaldivisions2=10, - verticaldivisions3=10, - horizontaldivisions3=10, - verticaldivisions4=10, - horizontaldivisions4=10, - rowdivisions5=0, - columndivisions5=0, - rowdivisions6=4, - columndivisions6=4, -) -pb = flopy.modpath.NodeParticleData(subdivisiondata=facedata, nodes=nodew) -# create forward particle group -fpth = f"{mp_nameb}.sloc" -pgb = flopy.modpath.ParticleGroupNodeTemplate( - particlegroupname="BACKWARD2", particledata=pb, filename=fpth -) - -# Create and run the pathline and timeseries analysis model. - -# + -# create modpath files -mp = flopy.modpath.Modpath7( - modelname=mp_namea, flowmodel=gwf, exe_name="mp7", model_ws=model_ws -) -flopy.modpath.Modpath7Bas(mp, porosity=0.1) -flopy.modpath.Modpath7Sim( - mp, - simulationtype="combined", - trackingdirection="backward", - weaksinkoption="pass_through", - weaksourceoption="pass_through", - referencetime=0.0, - stoptimeoption="extend", - timepointdata=[500, 1000.0], - particlegroups=pga, -) - -# write modpath datasets -mp.write_input() - -# run modpath -success, buff = mp.run_model(silent=True, report=True) -assert success, "mp7 failed to run" -for line in buff: - print(line) -# - - -# Load the pathline and timeseries data. - -fpth = model_ws / f"{mp_namea}.mppth" -p = flopy.utils.PathlineFile(fpth) -p0 = p.get_alldata() - -fpth = model_ws / f"{mp_namea}.timeseries" -ts = flopy.utils.TimeseriesFile(fpth) -ts0 = ts.get_alldata() - -# Plot the pathline and timeseries data. - -fig = plt.figure(figsize=(8, 8), constrained_layout=True) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -mm = flopy.plot.PlotMapView(modelgrid=mg, ax=ax) -ax.set_xlim(0, Lx) -ax.set_ylim(0, Ly) -cmap = mpl.colors.ListedColormap( - [ - "r", - "g", - ] -) -v = mm.plot_array(ibd, cmap=cmap, edgecolor="gray") -mm.plot_pathline(p0, layer="all", colors="blue", lw=0.75) -colors = ["green", "orange", "red"] -for k in range(nlay): - mm.plot_timeseries(ts0, layer=k, marker="o", lw=0, color=colors[k]) - -# Create and run the endpoint analysis model. - -# + -# create modpath files -mp = flopy.modpath.Modpath7( - modelname=mp_nameb, flowmodel=gwf, exe_name="mp7", model_ws=model_ws -) -flopy.modpath.Modpath7Bas(mp, porosity=0.1) -flopy.modpath.Modpath7Sim( - mp, - simulationtype="endpoint", - trackingdirection="backward", - weaksinkoption="pass_through", - weaksourceoption="pass_through", - referencetime=0.0, - stoptimeoption="extend", - particlegroups=pgb, -) - -# write modpath datasets -mp.write_input() - -# run modpath -success, buff = mp.run_model(silent=True, report=True) -assert success, "mp7 failed to run" -for line in buff: - print(line) -# - - -# Load the endpoint data. - -fpth = model_ws / f"{mp_nameb}.mpend" -e = flopy.utils.EndpointFile(fpth) -e0 = e.get_alldata() - -# Plot the endpoint data. - -fig = plt.figure(figsize=(8, 8), constrained_layout=True) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -mm = flopy.plot.PlotMapView(modelgrid=mg, ax=ax) -ax.set_xlim(0, Lx) -ax.set_ylim(0, Ly) -cmap = mpl.colors.ListedColormap( - [ - "r", - "g", - ] -) -v = mm.plot_array(ibd, cmap=cmap, edgecolor="gray") -mm.plot_endpoint(e0, direction="ending", colorbar=True, shrink=0.5) - -# Clean up the temporary workspace. - -try: - # ignore PermissionError on Windows - temp_dir.cleanup() -except: - pass diff --git a/.docs/Notebooks/modpath7_unstructured_lateral_example.py b/.docs/Notebooks/modpath7_unstructured_lateral_example.py deleted file mode 100644 index 9b3f85c3b..000000000 --- a/.docs/Notebooks/modpath7_unstructured_lateral_example.py +++ /dev/null @@ -1,576 +0,0 @@ -# --- -# jupyter: -# jupytext: -# notebook_metadata_filter: all -# text_representation: -# extension: .py -# format_name: light -# format_version: '1.5' -# jupytext_version: 1.14.5 -# kernelspec: -# display_name: Python 3 -# language: python -# name: python3 -# metadata: -# section: modpath -# authors: -# - name: Wes Bonelli -# --- - -# # Using MODPATH 7: DISV quadpatch example -# -# This notebook demonstrates example 4 from the MODPATH 7 documentation, a steady-state MODFLOW 6 simulation using a quadpatch DISV grid with an irregular domain and a large number of inactive cells. Particles are tracked backwards from terminating locations, including a pair of wells in a locally-refined region of the grid and constant-head cells along the grid's right side, to release locations along the left border of the grid's active region. Injection wells along the left-hand border are used to generate boundary flows. -# -# First import FloPy and set up a temporary workspace. - -# + -import sys -from pathlib import Path -from tempfile import TemporaryDirectory - -import matplotlib as mpl -import matplotlib.pyplot as plt -import numpy as np - -proj_root = Path.cwd().parent.parent - -import flopy - -temp_dir = TemporaryDirectory() -workspace = Path(temp_dir.name) -sim_name = "ex04_mf6" - -print("Python version:", sys.version) -print("NumPy version:", np.__version__) -print("Matplotlib version:", mpl.__version__) -print("FloPy version:", flopy.__version__) -# - - -# ## Grid creation/refinement -# -# In this example we use GRIDGEN to create a quadpatch grid with a refined region in the upper left quadrant. -# -# The grid has 3 nested refinement levels, all nearly but not perfectly rectangular (a 500x500 area is carved out of each corner of each). Outer levels of refinement have a width of 500. To produce this pattern we use 5 rectangular polygons for each level. -# -# First, create the coarse-grained grid discretization. - -nlay, nrow, ncol = 1, 21, 26 # coarsest-grained grid is 21x26 -delr = delc = 500.0 -top = 100.0 -botm = np.zeros((nlay, nrow, ncol), dtype=np.float32) -ms = flopy.modflow.Modflow() -dis = flopy.modflow.ModflowDis( - ms, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - top=top, - botm=botm, -) - -# Next, refine the grid. Create a `Gridgen` object from the base grid, then add refinement features (3 groups of polygons). - -# + -from flopy.utils.gridgen import Gridgen - -# create Gridgen workspace -gridgen_ws = workspace / "gridgen" -gridgen_ws.mkdir() - -# create Gridgen object -g = Gridgen(ms.modelgrid, model_ws=gridgen_ws) - -# add polygon for each refinement level -outer_polygon = [ - [ - (2500, 6000), - (2500, 9500), - (3000, 9500), - (3000, 10000), - (6000, 10000), - (6000, 9500), - (6500, 9500), - (6500, 6000), - (6000, 6000), - (6000, 5500), - (3000, 5500), - (3000, 6000), - (2500, 6000), - ] -] -g.add_refinement_features([outer_polygon], "polygon", 1, range(nlay)) -refshp0 = gridgen_ws / "rf0" - -middle_polygon = [ - [ - (3000, 6500), - (3000, 9000), - (3500, 9000), - (3500, 9500), - (5500, 9500), - (5500, 9000), - (6000, 9000), - (6000, 6500), - (5500, 6500), - (5500, 6000), - (3500, 6000), - (3500, 6500), - (3000, 6500), - ] -] -g.add_refinement_features([middle_polygon], "polygon", 2, range(nlay)) -refshp1 = gridgen_ws / "rf1" - -inner_polygon = [ - [ - (3500, 7000), - (3500, 8500), - (4000, 8500), - (4000, 9000), - (5000, 9000), - (5000, 8500), - (5500, 8500), - (5500, 7000), - (5000, 7000), - (5000, 6500), - (4000, 6500), - (4000, 7000), - (3500, 7000), - ] -] -g.add_refinement_features([inner_polygon], "polygon", 3, range(nlay)) -refshp2 = gridgen_ws / "rf2" -# - - -# Create and plot the refined grid with refinement levels superimposed. - -# + -g.build(verbose=False) -grid = flopy.discretization.VertexGrid(**g.get_gridprops_vertexgrid()) - -fig = plt.figure(figsize=(15, 15)) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -mm = flopy.plot.PlotMapView(model=ms) -grid.plot(ax=ax) -flopy.plot.plot_shapefile(refshp0, ax=ax, facecolor="green", alpha=0.3) -flopy.plot.plot_shapefile(refshp1, ax=ax, facecolor="green", alpha=0.5) -flopy.plot.plot_shapefile(str(refshp2), ax=ax, facecolor="green", alpha=0.7) -# - - -# ## Groundwater flow model -# -# Next, create a GWF model. The particle-tracking model will consume its output. - -# + -# simulation -sim = flopy.mf6.MFSimulation( - sim_name=sim_name, sim_ws=workspace, exe_name="mf6", version="mf6" -) - -# temporal discretization -tdis = flopy.mf6.ModflowTdis( - sim, time_units="days", nper=1, perioddata=[(10000, 1, 1.0)] -) - -# iterative model solver -ims = flopy.mf6.ModflowIms( - sim, - pname="ims", - complexity="SIMPLE", - outer_dvclose=1e-4, - outer_maximum=100, - inner_dvclose=1e-5, - under_relaxation_theta=0, - under_relaxation_kappa=0, - under_relaxation_gamma=0, - under_relaxation_momentum=0, - linear_acceleration="BICGSTAB", - relaxation_factor=0.99, - number_orthogonalizations=2, -) - -# groundwater flow model -gwf = flopy.mf6.ModflowGwf( - sim, modelname=sim_name, model_nam_file=f"{sim_name}.nam", save_flows=True -) - -# grid discretization -# fmt: off -idomain = [ - 0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1,0, - 0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,1,1, - 1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, - 0,0,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0, - 0,0,0,0,0,0,0,0,1,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,1,1,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,1,1,1,1,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,1, - 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0,0, - 0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, - 0,0,0,0,0,1,1,1,1,1,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0, - 0,0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,0,0,0,0,0,0,0,0 -] -# fmt: on -disv_props = g.get_gridprops_disv() -disv = flopy.mf6.ModflowGwfdisv( - gwf, length_units="feet", idomain=idomain, **disv_props -) - -# initial conditions -ic = flopy.mf6.ModflowGwfic(gwf, strt=150.0) - -# wells are tuples (layer, node number, q, iface) -wells = [ - # negative q: discharge - (0, 861, -30000.0, 0), - (0, 891, -30000.0, 0), - # positive q: injection - (0, 1959, 10000.0, 1), - (0, 1932, 10000.0, 3), - (0, 1931, 10000.0, 3), - (0, 1930, 5000.0, 1), - (0, 1930, 5000.0, 3), - (0, 1903, 5000.0, 1), - (0, 1903, 5000.0, 3), - (0, 1876, 10000.0, 3), - (0, 1875, 10000.0, 3), - (0, 1874, 5000.0, 1), - (0, 1874, 5000.0, 3), - (0, 1847, 10000.0, 3), - (0, 1846, 5000.0, 3), - (0, 1845, 5000.0, 1), - (0, 1845, 5000.0, 3), - (0, 1818, 5000.0, 1), - (0, 1818, 5000.0, 3), - (0, 1792, 10000.0, 1), - (0, 1766, 10000.0, 1), - (0, 1740, 5000.0, 1), - (0, 1740, 5000.0, 4), - (0, 1715, 5000.0, 1), - (0, 1715, 5000.0, 4), - (0, 1690, 10000.0, 1), - (0, 1646, 5000.0, 1), - (0, 1646, 5000.0, 4), - (0, 1549, 5000.0, 1), - (0, 1549, 5000.0, 4), - (0, 1332, 5000.0, 4), - (0, 1332, 5000.0, 1), - (0, 1021, 2500.0, 1), - (0, 1021, 2500.0, 4), - (0, 1020, 5000.0, 1), - (0, 708, 2500.0, 1), - (0, 708, 2500.0, 4), - (0, 711, 625.0, 1), - (0, 711, 625.0, 4), - (0, 710, 625.0, 1), - (0, 710, 625.0, 4), - (0, 409, 1250.0, 1), - (0, 407, 625.0, 1), - (0, 407, 625.0, 4), - (0, 402, 625.0, 1), - (0, 402, 625.0, 4), - (0, 413, 1250.0, 1), - (0, 411, 1250.0, 1), - (0, 203, 1250.0, 1), - (0, 202, 1250.0, 1), - (0, 202, 1250.0, 4), - (0, 199, 2500.0, 1), - (0, 197, 1250.0, 1), - (0, 197, 1250.0, 4), - (0, 96, 2500.0, 1), - (0, 97, 1250.0, 1), - (0, 97, 1250.0, 4), - (0, 103, 1250.0, 1), - (0, 103, 1250.0, 4), - (0, 102, 1250.0, 1), - (0, 102, 1250.0, 4), - (0, 43, 2500.0, 1), - (0, 43, 2500.0, 4), - (0, 44, 2500.0, 1), - (0, 44, 2500.0, 4), - (0, 45, 5000.0, 4), - (0, 10, 10000.0, 1), -] -flopy.mf6.modflow.mfgwfwel.ModflowGwfwel( - gwf, - maxbound=68, - auxiliary="IFACE", - save_flows=True, - stress_period_data={0: wells}, -) - -# node property flow -npf = flopy.mf6.ModflowGwfnpf( - gwf, - xt3doptions=True, - save_flows=True, - save_specific_discharge=True, - icelltype=[0], - k=[50], -) - -# constant head boundary (period, node number, head) -chd_bound = [ - (0, 1327, 150.0), - (0, 1545, 150.0), - (0, 1643, 150.0), - (0, 1687, 150.0), - (0, 1713, 150.0), -] -chd = flopy.mf6.ModflowGwfchd( - gwf, pname="chd", save_flows=True, stress_period_data=chd_bound -) - -# output control -budget_file = f"{sim_name}.bud" -head_file = f"{sim_name}.hds" -oc = flopy.mf6.ModflowGwfoc( - gwf, - pname="oc", - budget_filerecord=[budget_file], - head_filerecord=[head_file], - saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], -) -# - - -# Before running the simulation, view the model's boundary conditions. - -# + -fig = plt.figure(figsize=(13, 13)) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -mv = flopy.plot.PlotMapView(model=gwf, ax=ax) -mv.plot_grid(alpha=0.3) -mv.plot_ibound() -mv.plot_bc("WEL") -ax.add_patch( - mpl.patches.Rectangle( - ((ncol - 1) * delc, (nrow - 6) * delr), - 1000, - -2500, - linewidth=5, - facecolor="blue", - alpha=0.5, - ) -) -ax.legend( - handles=[ - mpl.patches.Patch(color="red", label="WEL"), - mpl.patches.Patch(color="blue", label="CHB"), - ] -) - -plt.show() -# - - -# Run the simulation. - -sim.set_sim_path(workspace) -sim.write_simulation() -success, buff = sim.run_simulation(silent=True, report=True) -assert success, "Failed to run MF6 simulation." -for line in buff: - print(line) - -# ## Particle tracking - -# -# This example is a reverse-tracking model, with termination and release zones inverted: we "release" particles from the constant head boundary on the grid's right edge and from the two pumping wells, and track the particles backwards to release locations at the wells along the left boundary of the active domain. - -# + -particles = [ - # node number, localx, localy, localz - (1327, 0.000, 0.125, 0.500), - (1327, 0.000, 0.375, 0.500), - (1327, 0.000, 0.625, 0.500), - (1327, 0.000, 0.875, 0.500), - (1545, 0.000, 0.125, 0.500), - (1545, 0.000, 0.375, 0.500), - (1545, 0.000, 0.625, 0.500), - (1545, 0.000, 0.875, 0.500), - (1643, 0.000, 0.125, 0.500), - (1643, 0.000, 0.375, 0.500), - (1643, 0.000, 0.625, 0.500), - (1643, 0.000, 0.875, 0.500), - (1687, 0.000, 0.125, 0.500), - (1687, 0.000, 0.375, 0.500), - (1687, 0.000, 0.625, 0.500), - (1687, 0.000, 0.875, 0.500), - (1713, 0.000, 0.125, 0.500), - (1713, 0.000, 0.375, 0.500), - (1713, 0.000, 0.625, 0.500), - (1713, 0.000, 0.875, 0.500), - (861, 0.000, 0.125, 0.500), - (861, 0.000, 0.375, 0.500), - (861, 0.000, 0.625, 0.500), - (861, 0.000, 0.875, 0.500), - (861, 1.000, 0.125, 0.500), - (861, 1.000, 0.375, 0.500), - (861, 1.000, 0.625, 0.500), - (861, 1.000, 0.875, 0.500), - (861, 0.125, 0.000, 0.500), - (861, 0.375, 0.000, 0.500), - (861, 0.625, 0.000, 0.500), - (861, 0.875, 0.000, 0.500), - (861, 0.125, 1.000, 0.500), - (861, 0.375, 1.000, 0.500), - (861, 0.625, 1.000, 0.500), - (861, 0.875, 1.000, 0.500), - (891, 0.000, 0.125, 0.500), - (891, 0.000, 0.375, 0.500), - (891, 0.000, 0.625, 0.500), - (891, 0.000, 0.875, 0.500), - (891, 1.000, 0.125, 0.500), - (891, 1.000, 0.375, 0.500), - (891, 1.000, 0.625, 0.500), - (891, 1.000, 0.875, 0.500), - (891, 0.125, 0.000, 0.500), - (891, 0.375, 0.000, 0.500), - (891, 0.625, 0.000, 0.500), - (891, 0.875, 0.000, 0.500), - (891, 0.125, 1.000, 0.500), - (891, 0.375, 1.000, 0.500), - (891, 0.625, 1.000, 0.500), - (891, 0.875, 1.000, 0.500), -] - -pd = flopy.modpath.ParticleData( - partlocs=[p[0] for p in particles], - localx=[p[1] for p in particles], - localy=[p[2] for p in particles], - localz=[p[3] for p in particles], - timeoffset=0, - drape=0, -) -pg = flopy.modpath.ParticleGroup( - particlegroupname="G1", particledata=pd, filename=f"{sim_name}.sloc" -) -# - - -# Create and run the backwards particle tracking model in `pathline` mode. - -# + -mp = flopy.modpath.Modpath7( - modelname=f"{sim_name}_mp", - flowmodel=gwf, - exe_name="mp7", - model_ws=workspace, -) -mpbas = flopy.modpath.Modpath7Bas( - mp, - porosity=0.1, -) -mpsim = flopy.modpath.Modpath7Sim( - mp, - simulationtype="pathline", - trackingdirection="backward", - budgetoutputoption="summary", - particlegroups=[pg], -) - -mp.write_input() -success, buff = mp.run_model(silent=True, report=True) -assert success, "Failed to run particle-tracking model." -for line in buff: - print(line) -# - - -# Load pathline data from the model's pathline output file. - -fpth = workspace / f"{sim_name}_mp.mppth" -p = flopy.utils.PathlineFile(fpth) -pl = p.get_destination_pathline_data( - range(gwf.modelgrid.nnodes), to_recarray=True -) - -# Load head data. - -hf = flopy.utils.HeadFile(workspace / f"{sim_name}.hds") -hd = hf.get_data() - -# Plot heads and particle paths over the grid. - -fig = plt.figure(figsize=(11, 11)) -ax = fig.add_subplot(1, 1, 1, aspect="equal") -mm = flopy.plot.PlotMapView(model=gwf) -mm.plot_grid(lw=0.5, alpha=0.5) -mm.plot_ibound() -mm.plot_array(hd, alpha=0.5) -mm.plot_pathline(pl, layer="all", lw=0.3, colors=["black"]) -plt.show() - -# Clean up the temporary workspace. - -try: - # ignore PermissionError on Windows - temp_dir.cleanup() -except: - pass From 3c6496b9578a1cb82c6165f50dd6d31100854905 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Wed, 3 Jul 2024 17:32:39 -0400 Subject: [PATCH 43/57] ci(rtd): add ref input for rtd build (#2257) Followup to #2254 --- .github/workflows/rtd.yml | 37 ++++++++++++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/.github/workflows/rtd.yml b/.github/workflows/rtd.yml index 6c876a9bd..a644e7f33 100644 --- a/.github/workflows/rtd.yml +++ b/.github/workflows/rtd.yml @@ -6,12 +6,39 @@ on: - master - develop workflow_dispatch: + inputs: + ref: + description: 'The tag, branch or commit hash to trigger an RTD build for. Branches and tags must be fully formed, e.g. refs/heads/ or refs/tags/ respectively.' + required: false + type: string + default: 'refs/heads/develop' concurrency: group: ${{ github.workflow }}-${{ github.ref }} cancel-in-progress: true jobs: + set_options: + name: Set release options + if: github.ref_name != 'master' + runs-on: ubuntu-22.04 + outputs: + ref: ${{ steps.set_ref.outputs.ref }} + steps: + - name: Set ref + id: set_ref + run: | + # if ref was provided explicitly via workflow_dispatch, use it + if [[ ("${{ github.event_name }}" == "workflow_dispatch") && (-n "${{ inputs.ref }}") ]]; then + ref="${{ inputs.ref }}" + echo "using ref $ref from workflow_dispatch" + else + # otherwise use the current branch + ref="${{ github.ref }}" + echo "using current ref $ref" + fi + echo "ref=$ref" >> $GITHUB_OUTPUT rtd_build: name: Prepare and test notebooks + needs: set_options runs-on: ${{ matrix.os }} strategy: fail-fast: false @@ -25,6 +52,8 @@ jobs: steps: - name: Checkout flopy repo uses: actions/checkout@v4 + with: + ref: ${{ needs.set_options.outputs.ref }} - name: Output repo information run: | @@ -109,7 +138,9 @@ jobs: # trigger rtd if previous job was successful rtd: name: Read the Docs trigger - needs: rtd_build + needs: + - rtd_build + - set_options runs-on: ubuntu-latest if: github.repository_owner == 'modflowpy' && @@ -118,9 +149,9 @@ jobs: github.event_name == 'workflow_dispatch' ) steps: - - name: Trigger RTDs build on master and develop branches + - name: Trigger RTDs build uses: dfm/rtds-action@v1 with: webhook_url: ${{ secrets.RTDS_WEBHOOK_URL }} webhook_token: ${{ secrets.RTDS_WEBHOOK_TOKEN }} - commit_ref: ${{ github.ref }} + commit_ref: ${{ needs.set_options.outputs.ref }} From 278efbf6f253be7f1e4b317c7b068206da91e8fb Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Wed, 3 Jul 2024 22:33:57 -0400 Subject: [PATCH 44/57] ci(rtd): fix artifact upload sha, update concurrency key, cleanup (#2258) The RTD build integration expects artifacts named according to the pattern prefix-sha. Previously the workflow used the current ref's hash, not the hash of the selected ref. Fix it so the RTD build can find the proper artifacts. Also add the workflow trigger to the concurrency group key. --- .github/workflows/rtd.yml | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/.github/workflows/rtd.yml b/.github/workflows/rtd.yml index a644e7f33..eb197d160 100644 --- a/.github/workflows/rtd.yml +++ b/.github/workflows/rtd.yml @@ -13,15 +13,15 @@ on: type: string default: 'refs/heads/develop' concurrency: - group: ${{ github.workflow }}-${{ github.ref }} + group: ${{ github.workflow }}-${{ github.ref }}-${{ github.event_name }} cancel-in-progress: true jobs: set_options: name: Set release options - if: github.ref_name != 'master' runs-on: ubuntu-22.04 outputs: ref: ${{ steps.set_ref.outputs.ref }} + sha: ${{ steps.set_sha.outputs.sha }} steps: - name: Set ref id: set_ref @@ -36,6 +36,15 @@ jobs: echo "using current ref $ref" fi echo "ref=$ref" >> $GITHUB_OUTPUT + - name: Set sha + id: set_sha + run: | + if [[ ("${{ github.event_name }}" == "workflow_dispatch") && (-n "${{ inputs.ref }}") ]]; then + sha=$(git rev-parse ${{ steps.set_ref.outputs.ref }}) + else + sha="${{ github.sha }}" + fi + echo "sha=$sha" >> $GITHUB_OUTPUT rtd_build: name: Prepare and test notebooks needs: set_options @@ -132,7 +141,7 @@ jobs: ) uses: actions/upload-artifact@v4 with: - name: notebooks-for-${{ github.sha }} + name: notebooks-for-${{ needs.set_options.outputs.sha }} path: .docs/Notebooks/*.ipynb # trigger rtd if previous job was successful From 19ddf6bf3331ab022b55788aca5a7e8471051b41 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Wed, 3 Jul 2024 23:17:46 -0400 Subject: [PATCH 45/57] ci(rtd): fix sha detection for manual runs (#2259) Followup to #2258 --- .github/workflows/rtd.yml | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/.github/workflows/rtd.yml b/.github/workflows/rtd.yml index eb197d160..b12988912 100644 --- a/.github/workflows/rtd.yml +++ b/.github/workflows/rtd.yml @@ -23,6 +23,7 @@ jobs: ref: ${{ steps.set_ref.outputs.ref }} sha: ${{ steps.set_sha.outputs.sha }} steps: + - name: Set ref id: set_ref run: | @@ -36,6 +37,12 @@ jobs: echo "using current ref $ref" fi echo "ref=$ref" >> $GITHUB_OUTPUT + + - name: Checkout flopy repo + uses: actions/checkout@v4 + with: + ref: ${{ steps.set_ref.outputs.ref }} + - name: Set sha id: set_sha run: | @@ -45,6 +52,7 @@ jobs: sha="${{ github.sha }}" fi echo "sha=$sha" >> $GITHUB_OUTPUT + rtd_build: name: Prepare and test notebooks needs: set_options From 9400d42ccbdb0e2b215e3a63ded8847f7112879b Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Wed, 10 Jul 2024 11:46:59 -0400 Subject: [PATCH 46/57] docs(examples): move geometry data to yml file, inline utilities (#2264) Small step towards #1872. Move geometry info to a YAML file under examples/data/ and move utils into scripts to remove common/ module import and sys.path manipulation. A later PR may introduce pooch as we have done for the mf6 examples: - MODFLOW-USGS/modflow6-examples#137 - MODFLOW-USGS/modflow6-examples#153 --- .../groundwater2023_watershed_example.py | 76 +- .../mf6_parallel_model_splitting_example.py | 25 +- .docs/common/groundwater2023_utils.py | 198 ----- .docs/common/notebook_utils.py | 776 ------------------ examples/data/groundwater2023/geometries.yml | 137 ++++ 5 files changed, 228 insertions(+), 984 deletions(-) delete mode 100644 .docs/common/groundwater2023_utils.py delete mode 100644 .docs/common/notebook_utils.py create mode 100644 examples/data/groundwater2023/geometries.yml diff --git a/.docs/Notebooks/groundwater2023_watershed_example.py b/.docs/Notebooks/groundwater2023_watershed_example.py index 686520926..8621505dd 100644 --- a/.docs/Notebooks/groundwater2023_watershed_example.py +++ b/.docs/Notebooks/groundwater2023_watershed_example.py @@ -29,12 +29,15 @@ import matplotlib.gridspec as gridspec import matplotlib.pyplot as plt import numpy as np -from shapely.geometry import LineString +import shapely +import yaml +from shapely.geometry import LineString, Polygon import flopy import flopy.plot.styles as styles from flopy.discretization import StructuredGrid, VertexGrid from flopy.utils.gridgen import Gridgen +from flopy.utils.gridintersect import GridIntersect from flopy.utils.triangle import Triangle from flopy.utils.voronoi import VoronoiGrid @@ -43,13 +46,70 @@ print(f"matplotlib version: {mpl.__version__}") print(f"flopy version: {flopy.__version__}") -# import all plot style information from defaults.py -sys.path.append("../common") -from groundwater2023_utils import ( - densify_geometry, - geometries, - set_idomain, - string2geom, + +# define a few utility functions +def string2geom(geostring, conversion=None): + if conversion is None: + multiplier = 1.0 + else: + multiplier = float(conversion) + res = [] + for line in geostring.split("\n"): + if not any(line): + continue + line = line.strip() + line = line.split(" ") + x = float(line[0]) * multiplier + y = float(line[1]) * multiplier + res.append((x, y)) + return res + + +def densify_geometry(line, step, keep_internal_nodes=True): + xy = [] # list of tuple of coordinates + lines_strings = [] + if keep_internal_nodes: + for idx in range(1, len(line)): + lines_strings.append( + shapely.geometry.LineString(line[idx - 1 : idx + 1]) + ) + else: + lines_strings = [shapely.geometry.LineString(line)] + + for line_string in lines_strings: + length_m = line_string.length # get the length + for distance in np.arange(0, length_m + step, step): + point = line_string.interpolate(distance) + xy_tuple = (point.x, point.y) + if xy_tuple not in xy: + xy.append(xy_tuple) + # make sure the end point is in xy + if keep_internal_nodes: + xy_tuple = line_string.coords[-1] + if xy_tuple not in xy: + xy.append(xy_tuple) + + return xy + + +# function to set the active and inactive model area +def set_idomain(grid, boundary): + ix = GridIntersect(grid, method="vertex", rtree=True) + result = ix.intersect(Polygon(boundary)) + idx = [coords for coords in result.cellids] + idx = np.array(idx, dtype=int) + nr = idx.shape[0] + if idx.ndim == 1: + idx = idx.reshape((nr, 1)) + idx = tuple([idx[:, i] for i in range(idx.shape[1])]) + idomain = np.zeros(grid.shape[1:], dtype=int) + idomain[idx] = 1 + idomain = idomain.reshape(grid.shape) + grid.idomain = idomain + + +geometries = yaml.safe_load( + open(pl.Path("../../examples/data/groundwater2023/geometries.yml")) ) # basic figure size diff --git a/.docs/Notebooks/mf6_parallel_model_splitting_example.py b/.docs/Notebooks/mf6_parallel_model_splitting_example.py index 77c4019be..a753f1fcb 100644 --- a/.docs/Notebooks/mf6_parallel_model_splitting_example.py +++ b/.docs/Notebooks/mf6_parallel_model_splitting_example.py @@ -26,14 +26,35 @@ import matplotlib.pyplot as plt import numpy as np +import yaml import flopy from flopy.mf6.utils import Mf6Splitter from flopy.plot import styles from flopy.utils.geometry import LineString, Polygon -sys.path.append("../common") -from notebook_utils import geometries, string2geom +geometries = yaml.safe_load( + open(Path("../../examples/data/groundwater2023/geometries.yml")) +) + + +# define a few utility functions +def string2geom(geostring, conversion=None): + if conversion is None: + multiplier = 1.0 + else: + multiplier = float(conversion) + res = [] + for line in geostring.split("\n"): + if not any(line): + continue + line = line.strip() + line = line.split(" ") + x = float(line[0]) * multiplier + y = float(line[1]) * multiplier + res.append((x, y)) + return res + # ## Example 1: splitting a simple structured grid model # diff --git a/.docs/common/groundwater2023_utils.py b/.docs/common/groundwater2023_utils.py deleted file mode 100644 index 7574611d9..000000000 --- a/.docs/common/groundwater2023_utils.py +++ /dev/null @@ -1,198 +0,0 @@ -import numpy as np -import shapely -from shapely.geometry import Polygon - -from flopy.utils.gridintersect import GridIntersect - -geometries = { - "boundary": """1.868012422360248456e+05 4.695652173913043953e+04 - 1.790372670807453396e+05 5.204968944099379587e+04 - 1.729813664596273447e+05 5.590062111801243009e+04 - 1.672360248447204940e+05 5.987577639751553215e+04 - 1.631987577639751253e+05 6.335403726708075556e+04 - 1.563664596273291972e+05 6.819875776397516893e+04 - 1.509316770186335489e+05 7.229813664596274612e+04 - 1.453416149068323139e+05 7.527950310559007630e+04 - 1.395962732919254631e+05 7.627329192546584818e+04 - 1.357142857142857101e+05 7.664596273291927355e+04 - 1.329192546583850926e+05 7.751552795031057030e+04 - 1.268633540372670832e+05 8.062111801242237561e+04 - 1.218944099378881947e+05 8.285714285714286962e+04 - 1.145962732919254486e+05 8.571428571428572468e+04 - 1.069875776397515583e+05 8.869565217391305487e+04 - 1.023291925465838431e+05 8.931677018633540138e+04 - 9.456521739130433707e+04 9.068322981366459862e+04 - 8.804347826086955320e+04 9.080745341614908830e+04 - 7.950310559006211406e+04 9.267080745341615693e+04 - 7.562111801242236106e+04 9.391304347826087906e+04 - 6.692546583850930620e+04 9.602484472049689793e+04 - 5.667701863354037778e+04 9.763975155279504543e+04 - 4.906832298136646568e+04 9.689440993788820924e+04 - 3.897515527950309479e+04 9.540372670807455142e+04 - 3.167701863354036323e+04 9.304347826086958230e+04 - 2.375776397515527788e+04 8.757763975155279331e+04 - 1.847826086956521613e+04 8.161490683229814749e+04 - 1.164596273291925172e+04 7.739130434782608063e+04 - 6.211180124223596977e+03 7.055900621118013805e+04 - 4.347826086956512881e+03 6.422360248447205959e+04 - 1.863354037267072272e+03 6.037267080745341809e+04 - 2.639751552795024509e+03 5.602484472049689793e+04 - 1.552795031055893560e+03 5.279503105590062478e+04 - 7.763975155279410956e+02 4.186335403726709046e+04 - 2.018633540372667312e+03 3.813664596273292409e+04 - 6.055900621118013078e+03 3.341614906832297856e+04 - 1.335403726708074100e+04 2.782608695652173992e+04 - 2.577639751552794405e+04 2.086956521739130767e+04 - 3.416149068322980747e+04 1.763975155279503815e+04 - 4.642857142857142753e+04 1.440993788819875044e+04 - 5.636645962732918997e+04 1.130434782608694877e+04 - 6.459627329192546313e+04 9.813664596273290954e+03 - 8.555900621118012350e+04 6.832298136645956220e+03 - 9.829192546583850344e+04 5.093167701863346338e+03 - 1.085403726708074391e+05 4.347826086956525614e+03 - 1.200310559006211115e+05 4.223602484472040487e+03 - 1.296583850931677007e+05 4.347826086956525614e+03 - 1.354037267080745369e+05 5.590062111801232277e+03 - 1.467391304347825935e+05 1.267080745341615875e+04 - 1.563664596273291972e+05 1.937888198757762802e+04 - 1.630434782608695677e+05 2.198757763975155467e+04 - 1.694099378881987650e+05 2.434782608695652743e+04 - 1.782608695652173774e+05 2.981366459627329095e+04 - 1.833850931677018234e+05 3.180124223602484562e+04 - 1.868012422360248456e+05 3.577639751552795497e+04""", - "streamseg1": """1.868012422360248456e+05 4.086956521739130403e+04 - 1.824534161490683327e+05 4.086956521739130403e+04 - 1.770186335403726553e+05 4.124223602484472940e+04 - 1.737577639751552779e+05 4.186335403726709046e+04 - 1.703416149068323139e+05 4.310559006211180531e+04 - 1.670807453416148783e+05 4.397515527950310934e+04 - 1.636645962732919143e+05 4.484472049689441337e+04 - 1.590062111801242281e+05 4.559006211180124228e+04 - 1.555900621118012350e+05 4.559006211180124228e+04 - 1.510869565217391064e+05 4.546583850931677443e+04 - 1.479813664596273156e+05 4.534161490683229931e+04 - 1.453416149068323139e+05 4.496894409937888850e+04 - 1.377329192546583654e+05 4.447204968944099528e+04 - 1.326086956521739194e+05 4.447204968944099528e+04 - 1.285714285714285652e+05 4.434782608695652743e+04 - 1.245341614906832110e+05 4.472049689440993825e+04 - 1.215838509316770069e+05 4.509316770186335634e+04 - 1.161490683229813585e+05 4.509316770186335634e+04 - 1.125776397515527933e+05 4.459627329192547040e+04 - 1.074534161490683036e+05 4.385093167701864149e+04 - 1.018633540372670686e+05 4.347826086956522340e+04 - 9.798136645962731563e+04 4.360248447204969125e+04 - 9.223602484472049400e+04 4.310559006211180531e+04 - 8.602484472049689793e+04 4.198757763975155831e+04 - 7.981366459627327276e+04 4.173913043478261534e+04 - 7.468944099378881219e+04 4.248447204968944425e+04 - 7.034161490683228476e+04 4.385093167701864149e+04 - 6.785714285714285506e+04 4.621118012422360334e+04 - 6.583850931677018525e+04 4.919254658385094081e+04 - 6.319875776397513982e+04 5.192546583850932075e+04 - 6.009316770186335634e+04 5.677018633540373412e+04 - 5.605590062111800216e+04 5.950310559006211406e+04 - 5.279503105590060295e+04 6.124223602484472940e+04 - 4.751552795031056303e+04 6.211180124223603343e+04 - 3.990683229813664366e+04 6.335403726708075556e+04 - 3.276397515527949508e+04 6.409937888198757719e+04 - 2.934782608695651652e+04 6.509316770186336362e+04 - 2.546583850931676716e+04 6.832298136645962950e+04""", - "streamseg2": """7.025161490683228476e+04 4.375093167701864149e+04 - 6.816770186335404287e+04 4.273291925465839449e+04 - 6.490683229813665093e+04 4.211180124223603343e+04 - 6.164596273291925900e+04 4.173913043478262261e+04 - 5.776397515527951327e+04 4.124223602484472940e+04 - 5.450310559006211406e+04 4.049689440993789322e+04 - 4.984472049689442065e+04 3.937888198757764621e+04 - 4.534161490683231386e+04 3.801242236024845624e+04 - 4.114906832298137306e+04 3.664596273291926627e+04 - 3.913043478260868869e+04 3.565217391304348712e+04 - 3.649068322981366509e+04 3.416149068322981475e+04 - 3.322981366459628043e+04 3.242236024844721760e+04 - 3.012422360248447148e+04 3.105590062111801672e+04 - 2.608695652173913550e+04 2.957521739130435890e+04""", - "streamseg3": """1.059006211180124228e+05 4.335403726708074828e+04 - 1.029503105590062187e+05 4.223602484472050128e+04 - 1.004658385093167890e+05 4.024844720496894297e+04 - 9.937888198757765349e+04 3.788819875776398112e+04 - 9.627329192546584818e+04 3.490683229813664366e+04 - 9.285714285714286962e+04 3.316770186335403559e+04 - 8.897515527950311662e+04 3.093167701863354159e+04 - 8.338509316770188161e+04 2.795031055900621504e+04 - 7.872670807453416637e+04 2.670807453416148928e+04 - 7.329192546583851799e+04 2.385093167701863058e+04 - 6.863354037267081731e+04 2.111801242236025064e+04 - 6.304347826086958230e+04 1.863354037267081003e+04""", - "streamseg4": """1.371118012422360480e+05 4.472049689440994553e+04 - 1.321428571428571595e+05 4.720496894409938250e+04 - 1.285714285714285652e+05 4.981366459627330187e+04 - 1.243788819875776535e+05 5.341614906832298584e+04 - 1.189440993788819906e+05 5.540372670807454415e+04 - 1.125776397515527933e+05 5.627329192546584818e+04 - 1.065217391304347839e+05 5.726708074534162733e+04 - 1.020186335403726698e+05 5.913043478260870324e+04 - 9.409937888198759174e+04 6.273291925465840177e+04 - 9.192546583850932075e+04 6.633540372670808574e+04 - 8.881987577639751544e+04 7.242236024844722124e+04 - 8.586956521739131131e+04 7.552795031055902655e+04 - 8.369565217391305487e+04 7.962732919254660374e+04""", -} - - -def string2geom(geostring, conversion=None): - if conversion is None: - multiplier = 1.0 - else: - multiplier = float(conversion) - res = [] - for line in geostring.split("\n"): - line = line.strip() - line = line.split(" ") - x = float(line[0]) * multiplier - y = float(line[1]) * multiplier - res.append((x, y)) - return res - - -def densify_geometry(line, step, keep_internal_nodes=True): - xy = [] # list of tuple of coordinates - lines_strings = [] - if keep_internal_nodes: - for idx in range(1, len(line)): - lines_strings.append( - shapely.geometry.LineString(line[idx - 1 : idx + 1]) - ) - else: - lines_strings = [shapely.geometry.LineString(line)] - - for line_string in lines_strings: - length_m = line_string.length # get the length - for distance in np.arange(0, length_m + step, step): - point = line_string.interpolate(distance) - xy_tuple = (point.x, point.y) - if xy_tuple not in xy: - xy.append(xy_tuple) - # make sure the end point is in xy - if keep_internal_nodes: - xy_tuple = line_string.coords[-1] - if xy_tuple not in xy: - xy.append(xy_tuple) - - return xy - - -# function to set the active and inactive model area -def set_idomain(grid, boundary): - ix = GridIntersect(grid, method="vertex", rtree=True) - result = ix.intersect(Polygon(boundary)) - idx = [coords for coords in result.cellids] - idx = np.array(idx, dtype=int) - nr = idx.shape[0] - if idx.ndim == 1: - idx = idx.reshape((nr, 1)) - idx = tuple([idx[:, i] for i in range(idx.shape[1])]) - idomain = np.zeros(grid.shape[1:], dtype=int) - idomain[idx] = 1 - idomain = idomain.reshape(grid.shape) - grid.idomain = idomain diff --git a/.docs/common/notebook_utils.py b/.docs/common/notebook_utils.py deleted file mode 100644 index b805d4c92..000000000 --- a/.docs/common/notebook_utils.py +++ /dev/null @@ -1,776 +0,0 @@ -import os -import sys -from pathlib import Path - -import numpy as np - -try: - import flopy -except ImportError: - fpth = os.path.abspath(os.path.join("..", "..", "..")) - sys.path.append(fpth) - import flopy - - -def get_project_root_path() -> Path: - return Path.cwd().parent.parent - - -geometries = { - "boundary": """1.868012422360248456e+05 4.695652173913043953e+04 - 1.790372670807453396e+05 5.204968944099379587e+04 - 1.729813664596273447e+05 5.590062111801243009e+04 - 1.672360248447204940e+05 5.987577639751553215e+04 - 1.631987577639751253e+05 6.335403726708075556e+04 - 1.563664596273291972e+05 6.819875776397516893e+04 - 1.509316770186335489e+05 7.229813664596274612e+04 - 1.453416149068323139e+05 7.527950310559007630e+04 - 1.395962732919254631e+05 7.627329192546584818e+04 - 1.357142857142857101e+05 7.664596273291927355e+04 - 1.329192546583850926e+05 7.751552795031057030e+04 - 1.268633540372670832e+05 8.062111801242237561e+04 - 1.218944099378881947e+05 8.285714285714286962e+04 - 1.145962732919254486e+05 8.571428571428572468e+04 - 1.069875776397515583e+05 8.869565217391305487e+04 - 1.023291925465838431e+05 8.931677018633540138e+04 - 9.456521739130433707e+04 9.068322981366459862e+04 - 8.804347826086955320e+04 9.080745341614908830e+04 - 7.950310559006211406e+04 9.267080745341615693e+04 - 7.562111801242236106e+04 9.391304347826087906e+04 - 6.692546583850930620e+04 9.602484472049689793e+04 - 5.667701863354037778e+04 9.763975155279504543e+04 - 4.906832298136646568e+04 9.689440993788820924e+04 - 3.897515527950309479e+04 9.540372670807455142e+04 - 3.167701863354036323e+04 9.304347826086958230e+04 - 2.375776397515527788e+04 8.757763975155279331e+04 - 1.847826086956521613e+04 8.161490683229814749e+04 - 1.164596273291925172e+04 7.739130434782608063e+04 - 6.211180124223596977e+03 7.055900621118013805e+04 - 4.347826086956512881e+03 6.422360248447205959e+04 - 1.863354037267072272e+03 6.037267080745341809e+04 - 2.639751552795024509e+03 5.602484472049689793e+04 - 1.552795031055893560e+03 5.279503105590062478e+04 - 7.763975155279410956e+02 4.186335403726709046e+04 - 2.018633540372667312e+03 3.813664596273292409e+04 - 6.055900621118013078e+03 3.341614906832297856e+04 - 1.335403726708074100e+04 2.782608695652173992e+04 - 2.577639751552794405e+04 2.086956521739130767e+04 - 3.416149068322980747e+04 1.763975155279503815e+04 - 4.642857142857142753e+04 1.440993788819875044e+04 - 5.636645962732918997e+04 1.130434782608694877e+04 - 6.459627329192546313e+04 9.813664596273290954e+03 - 8.555900621118012350e+04 6.832298136645956220e+03 - 9.829192546583850344e+04 5.093167701863346338e+03 - 1.085403726708074391e+05 4.347826086956525614e+03 - 1.200310559006211115e+05 4.223602484472040487e+03 - 1.296583850931677007e+05 4.347826086956525614e+03 - 1.354037267080745369e+05 5.590062111801232277e+03 - 1.467391304347825935e+05 1.267080745341615875e+04 - 1.563664596273291972e+05 1.937888198757762802e+04 - 1.630434782608695677e+05 2.198757763975155467e+04 - 1.694099378881987650e+05 2.434782608695652743e+04 - 1.782608695652173774e+05 2.981366459627329095e+04 - 1.833850931677018234e+05 3.180124223602484562e+04 - 1.868012422360248456e+05 3.577639751552795497e+04""", - "streamseg1": """1.868012422360248456e+05 4.086956521739130403e+04 - 1.824534161490683327e+05 4.086956521739130403e+04 - 1.770186335403726553e+05 4.124223602484472940e+04 - 1.737577639751552779e+05 4.186335403726709046e+04 - 1.703416149068323139e+05 4.310559006211180531e+04 - 1.670807453416148783e+05 4.397515527950310934e+04 - 1.636645962732919143e+05 4.484472049689441337e+04 - 1.590062111801242281e+05 4.559006211180124228e+04 - 1.555900621118012350e+05 4.559006211180124228e+04 - 1.510869565217391064e+05 4.546583850931677443e+04 - 1.479813664596273156e+05 4.534161490683229931e+04 - 1.453416149068323139e+05 4.496894409937888850e+04 - 1.377329192546583654e+05 4.447204968944099528e+04 - 1.326086956521739194e+05 4.447204968944099528e+04 - 1.285714285714285652e+05 4.434782608695652743e+04 - 1.245341614906832110e+05 4.472049689440993825e+04 - 1.215838509316770069e+05 4.509316770186335634e+04 - 1.161490683229813585e+05 4.509316770186335634e+04 - 1.125776397515527933e+05 4.459627329192547040e+04 - 1.074534161490683036e+05 4.385093167701864149e+04 - 1.018633540372670686e+05 4.347826086956522340e+04 - 9.798136645962731563e+04 4.360248447204969125e+04 - 9.223602484472049400e+04 4.310559006211180531e+04 - 8.602484472049689793e+04 4.198757763975155831e+04 - 7.981366459627327276e+04 4.173913043478261534e+04 - 7.468944099378881219e+04 4.248447204968944425e+04 - 7.034161490683228476e+04 4.385093167701864149e+04 - 6.785714285714285506e+04 4.621118012422360334e+04 - 6.583850931677018525e+04 4.919254658385094081e+04 - 6.319875776397513982e+04 5.192546583850932075e+04 - 6.009316770186335634e+04 5.677018633540373412e+04 - 5.605590062111800216e+04 5.950310559006211406e+04 - 5.279503105590060295e+04 6.124223602484472940e+04 - 4.751552795031056303e+04 6.211180124223603343e+04 - 3.990683229813664366e+04 6.335403726708075556e+04 - 3.276397515527949508e+04 6.409937888198757719e+04 - 2.934782608695651652e+04 6.509316770186336362e+04 - 2.546583850931676716e+04 6.832298136645962950e+04""", - "streamseg2": """7.025161490683228476e+04 4.375093167701864149e+04 - 6.816770186335404287e+04 4.273291925465839449e+04 - 6.490683229813665093e+04 4.211180124223603343e+04 - 6.164596273291925900e+04 4.173913043478262261e+04 - 5.776397515527951327e+04 4.124223602484472940e+04 - 5.450310559006211406e+04 4.049689440993789322e+04 - 4.984472049689442065e+04 3.937888198757764621e+04 - 4.534161490683231386e+04 3.801242236024845624e+04 - 4.114906832298137306e+04 3.664596273291926627e+04 - 3.913043478260868869e+04 3.565217391304348712e+04 - 3.649068322981366509e+04 3.416149068322981475e+04 - 3.322981366459628043e+04 3.242236024844721760e+04 - 3.012422360248447148e+04 3.105590062111801672e+04 - 2.608695652173913550e+04 2.957521739130435890e+04""", - "streamseg3": """1.059006211180124228e+05 4.335403726708074828e+04 - 1.029503105590062187e+05 4.223602484472050128e+04 - 1.004658385093167890e+05 4.024844720496894297e+04 - 9.937888198757765349e+04 3.788819875776398112e+04 - 9.627329192546584818e+04 3.490683229813664366e+04 - 9.285714285714286962e+04 3.316770186335403559e+04 - 8.897515527950311662e+04 3.093167701863354159e+04 - 8.338509316770188161e+04 2.795031055900621504e+04 - 7.872670807453416637e+04 2.670807453416148928e+04 - 7.329192546583851799e+04 2.385093167701863058e+04 - 6.863354037267081731e+04 2.111801242236025064e+04 - 6.304347826086958230e+04 1.863354037267081003e+04""", - "streamseg4": """1.371118012422360480e+05 4.472049689440994553e+04 - 1.321428571428571595e+05 4.720496894409938250e+04 - 1.285714285714285652e+05 4.981366459627330187e+04 - 1.243788819875776535e+05 5.341614906832298584e+04 - 1.189440993788819906e+05 5.540372670807454415e+04 - 1.125776397515527933e+05 5.627329192546584818e+04 - 1.065217391304347839e+05 5.726708074534162733e+04 - 1.020186335403726698e+05 5.913043478260870324e+04 - 9.409937888198759174e+04 6.273291925465840177e+04 - 9.192546583850932075e+04 6.633540372670808574e+04 - 8.881987577639751544e+04 7.242236024844722124e+04 - 8.586956521739131131e+04 7.552795031055902655e+04 - 8.369565217391305487e+04 7.962732919254660374e+04""", -} - - -def string2geom(geostring, conversion=None): - if conversion is None: - multiplier = 1.0 - else: - multiplier = float(conversion) - res = [] - for line in geostring.split("\n"): - line = line.strip() - line = line.split(" ") - x = float(line[0]) * multiplier - y = float(line[1]) * multiplier - res.append((x, y)) - return res - - -def run(ws): - ## load and run vertex grid example - # run installed version of flopy or add local path - if not os.path.exists(ws): - os.mkdir(ws) - - from flopy.utils.gridgen import Gridgen - - Lx = 10000.0 - Ly = 10500.0 - nlay = 3 - nrow = 21 - ncol = 20 - delr = Lx / ncol - delc = Ly / nrow - top = 400 - botm = [220, 200, 0] - - ms = flopy.modflow.Modflow() - dis5 = flopy.modflow.ModflowDis( - ms, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - top=top, - botm=botm, - ) - - model_name = "mp7p2" - model_ws = os.path.join(ws, "mp7_ex2", "mf6") - gridgen_ws = os.path.join(model_ws, "gridgen") - g = Gridgen(ms.modelgrid, model_ws=gridgen_ws) - - rf0shp = os.path.join(gridgen_ws, "rf0") - xmin = 7 * delr - xmax = 12 * delr - ymin = 8 * delc - ymax = 13 * delc - rfpoly = [ - [ - [ - (xmin, ymin), - (xmax, ymin), - (xmax, ymax), - (xmin, ymax), - (xmin, ymin), - ] - ] - ] - g.add_refinement_features(rfpoly, "polygon", 1, range(nlay)) - - rf1shp = os.path.join(gridgen_ws, "rf1") - xmin = 8 * delr - xmax = 11 * delr - ymin = 9 * delc - ymax = 12 * delc - rfpoly = [ - [ - [ - (xmin, ymin), - (xmax, ymin), - (xmax, ymax), - (xmin, ymax), - (xmin, ymin), - ] - ] - ] - g.add_refinement_features(rfpoly, "polygon", 2, range(nlay)) - - rf2shp = os.path.join(gridgen_ws, "rf2") - xmin = 9 * delr - xmax = 10 * delr - ymin = 10 * delc - ymax = 11 * delc - rfpoly = [ - [ - [ - (xmin, ymin), - (xmax, ymin), - (xmax, ymax), - (xmin, ymax), - (xmin, ymin), - ] - ] - ] - g.add_refinement_features(rfpoly, "polygon", 3, range(nlay)) - - g.build(verbose=False) - - gridprops = g.get_gridprops_disv() - ncpl = gridprops["ncpl"] - top = gridprops["top"] - botm = gridprops["botm"] - nvert = gridprops["nvert"] - vertices = gridprops["vertices"] - cell2d = gridprops["cell2d"] - # cellxy = gridprops['cellxy'] - - # create simulation - sim = flopy.mf6.MFSimulation( - sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=model_ws - ) - - # create tdis package - tdis_rc = [(1000.0, 1, 1.0)] - tdis = flopy.mf6.ModflowTdis( - sim, pname="tdis", time_units="DAYS", perioddata=tdis_rc - ) - - # create gwf model - gwf = flopy.mf6.ModflowGwf( - sim, modelname=model_name, model_nam_file=f"{model_name}.nam" - ) - gwf.name_file.save_flows = True - - # create iterative model solution and register the gwf model with it - ims = flopy.mf6.ModflowIms( - sim, - pname="ims", - print_option="SUMMARY", - complexity="SIMPLE", - outer_hclose=1.0e-5, - outer_maximum=100, - under_relaxation="NONE", - inner_maximum=100, - inner_hclose=1.0e-6, - rcloserecord=0.1, - linear_acceleration="BICGSTAB", - scaling_method="NONE", - reordering_method="NONE", - relaxation_factor=0.99, - ) - sim.register_ims_package(ims, [gwf.name]) - - # disv - disv = flopy.mf6.ModflowGwfdisv( - gwf, - nlay=nlay, - ncpl=ncpl, - top=top, - botm=botm, - nvert=nvert, - vertices=vertices, - cell2d=cell2d, - ) - - # initial conditions - ic = flopy.mf6.ModflowGwfic(gwf, pname="ic", strt=320.0) - - # node property flow - npf = flopy.mf6.ModflowGwfnpf( - gwf, - xt3doptions=[("xt3d")], - save_specific_discharge=True, - icelltype=[1, 0, 0], - k=[50.0, 0.01, 200.0], - k33=[10.0, 0.01, 20.0], - ) - - # wel - wellpoints = [(4750.0, 5250.0)] - welcells = g.intersect(wellpoints, "point", 0) - # welspd = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=1, aux_vars=['iface']) - welspd = [[(2, icpl), -150000, 0] for icpl in welcells["nodenumber"]] - wel = flopy.mf6.ModflowGwfwel( - gwf, - print_input=True, - auxiliary=[("iface",)], - stress_period_data=welspd, - ) - - # rch - aux = [np.ones(ncpl, dtype=int) * 6] - rch = flopy.mf6.ModflowGwfrcha( - gwf, recharge=0.005, auxiliary=[("iface",)], aux={0: [6]} - ) - # riv - riverline = [[(Lx - 1.0, Ly), (Lx - 1.0, 0.0)]] - rivcells = g.intersect(riverline, "line", 0) - rivspd = [ - [(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"] - ] - riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd) - - # output control - oc = flopy.mf6.ModflowGwfoc( - gwf, - pname="oc", - budget_filerecord=f"{model_name}.cbb", - head_filerecord=f"{model_name}.hds", - headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], - saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - ) - - sim.write_simulation() - success, buff = sim.run_simulation(silent=True, report=True) - if success: - for line in buff: - print(line) - else: - raise ValueError("Failed to run.") - - mp_namea = f"{model_name}a_mp" - mp_nameb = f"{model_name}b_mp" - - pcoord = np.array( - [ - [0.000, 0.125, 0.500], - [0.000, 0.375, 0.500], - [0.000, 0.625, 0.500], - [0.000, 0.875, 0.500], - [1.000, 0.125, 0.500], - [1.000, 0.375, 0.500], - [1.000, 0.625, 0.500], - [1.000, 0.875, 0.500], - [0.125, 0.000, 0.500], - [0.375, 0.000, 0.500], - [0.625, 0.000, 0.500], - [0.875, 0.000, 0.500], - [0.125, 1.000, 0.500], - [0.375, 1.000, 0.500], - [0.625, 1.000, 0.500], - [0.875, 1.000, 0.500], - ] - ) - nodew = gwf.disv.ncpl.array * 2 + welcells["nodenumber"][0] - plocs = [nodew for i in range(pcoord.shape[0])] - - # create particle data - pa = flopy.modpath.ParticleData( - plocs, - structured=False, - localx=pcoord[:, 0], - localy=pcoord[:, 1], - localz=pcoord[:, 2], - drape=0, - ) - - # create backward particle group - fpth = f"{mp_namea}.sloc" - pga = flopy.modpath.ParticleGroup( - particlegroupname="BACKWARD1", particledata=pa, filename=fpth - ) - - facedata = flopy.modpath.FaceDataType( - drape=0, - verticaldivisions1=10, - horizontaldivisions1=10, - verticaldivisions2=10, - horizontaldivisions2=10, - verticaldivisions3=10, - horizontaldivisions3=10, - verticaldivisions4=10, - horizontaldivisions4=10, - rowdivisions5=0, - columndivisions5=0, - rowdivisions6=4, - columndivisions6=4, - ) - pb = flopy.modpath.NodeParticleData(subdivisiondata=facedata, nodes=nodew) - # create forward particle group - fpth = f"{mp_nameb}.sloc" - pgb = flopy.modpath.ParticleGroupNodeTemplate( - particlegroupname="BACKWARD2", particledata=pb, filename=fpth - ) - - # create modpath files - mp = flopy.modpath.Modpath7( - modelname=mp_namea, flowmodel=gwf, exe_name="mp7", model_ws=model_ws - ) - flopy.modpath.Modpath7Bas(mp, porosity=0.1) - flopy.modpath.Modpath7Sim( - mp, - simulationtype="combined", - trackingdirection="backward", - weaksinkoption="pass_through", - weaksourceoption="pass_through", - referencetime=0.0, - stoptimeoption="extend", - timepointdata=[500, 1000.0], - particlegroups=pga, - ) - - # write modpath datasets - mp.write_input() - - # run modpath - success, buff = mp.run_model(silent=True, report=True) - if success: - for line in buff: - print(line) - else: - raise ValueError("Failed to run.") - - # create modpath files - mp = flopy.modpath.Modpath7( - modelname=mp_nameb, flowmodel=gwf, exe_name="mp7", model_ws=model_ws - ) - flopy.modpath.Modpath7Bas(mp, porosity=0.1) - flopy.modpath.Modpath7Sim( - mp, - simulationtype="endpoint", - trackingdirection="backward", - weaksinkoption="pass_through", - weaksourceoption="pass_through", - referencetime=0.0, - stoptimeoption="extend", - particlegroups=pgb, - ) - - # write modpath datasets - mp.write_input() - - # run modpath - success, buff = mp.run_model(silent=True, report=True) - if success: - for line in buff: - print(line) - else: - raise ValueError("Failed to run.") - - -example_name = "ex-gwt-keating" - -# Model units - -length_units = "m" -time_units = "days" - -# Table of model parameters - -nlay = 80 # Number of layers -nrow = 1 # Number of rows -ncol = 400 # Number of columns -delr = 25.0 # Column width ($m$) -delc = 1.0 # Row width ($m$) -delz = 25.0 # Layer thickness ($m$) -top = 2000.0 # Top of model domain ($m$) -bottom = 0.0 # Bottom of model domain ($m$) -hka = 1.0e-12 # Permeability of aquifer ($m^2$) -hkc = 1.0e-18 # Permeability of aquitard ($m^2$) -h1 = 800.0 # Head on left side ($m$) -h2 = 100.0 # Head on right side ($m$) -recharge = 0.5 # Recharge ($kg/s$) -recharge_conc = 1.0 # Normalized recharge concentration (unitless) -alpha_l = 1.0 # Longitudinal dispersivity ($m$) -alpha_th = 1.0 # Transverse horizontal dispersivity ($m$) -alpha_tv = 1.0 # Transverse vertical dispersivity ($m$) -period1 = 730 # Length of first simulation period ($d$) -period2 = 29270.0 # Length of second simulation period ($d$) -porosity = 0.1 # Porosity of mobile domain (unitless) -obs1 = (49, 1, 119) # Layer, row, and column for observation 1 -obs2 = (77, 1, 359) # Layer, row, and column for observation 2 - -obs1 = tuple([i - 1 for i in obs1]) -obs2 = tuple([i - 1 for i in obs2]) -seconds_to_days = 24.0 * 60.0 * 60.0 -permeability_to_conductivity = 1000.0 * 9.81 / 1.0e-3 * seconds_to_days -hka = hka * permeability_to_conductivity -hkc = hkc * permeability_to_conductivity -botm = [top - (k + 1) * delz for k in range(nlay)] -x = np.arange(0, 10000.0, delr) + delr / 2.0 -plotaspect = 1.0 - -# Fill hydraulic conductivity array -hydraulic_conductivity = np.ones((nlay, nrow, ncol), dtype=float) * hka -for k in range(nlay): - if 1000.0 <= botm[k] < 1100.0: - for j in range(ncol): - if 3000.0 <= x[j] <= 6000.0: - hydraulic_conductivity[k, 0, j] = hkc - -# Calculate recharge by converting from kg/s to m/d -rcol = [] -for jcol in range(ncol): - if 4200.0 <= x[jcol] <= 4800.0: - rcol.append(jcol) -number_recharge_cells = len(rcol) -rrate = recharge * seconds_to_days / 1000.0 -cell_area = delr * delc -rrate = rrate / (float(number_recharge_cells) * cell_area) -rchspd = {} -rchspd[0] = [[(0, 0, j), rrate, recharge_conc] for j in rcol] -rchspd[1] = [[(0, 0, j), rrate, 0.0] for j in rcol] - - -def build_mf6gwf(sim_folder): - ws = os.path.join(sim_folder, "mf6-gwt-keating") - name = "flow" - sim_ws = os.path.join(ws, "mf6gwf") - sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=sim_ws, exe_name="mf6") - tdis_ds = ((period1, 1, 1.0), (period2, 1, 1.0)) - flopy.mf6.ModflowTdis( - sim, nper=len(tdis_ds), perioddata=tdis_ds, time_units=time_units - ) - flopy.mf6.ModflowIms( - sim, - print_option="summary", - complexity="complex", - no_ptcrecord="all", - outer_dvclose=1.0e-4, - outer_maximum=2000, - under_relaxation="dbd", - linear_acceleration="BICGSTAB", - under_relaxation_theta=0.7, - under_relaxation_kappa=0.08, - under_relaxation_gamma=0.05, - under_relaxation_momentum=0.0, - backtracking_number=20, - backtracking_tolerance=2.0, - backtracking_reduction_factor=0.2, - backtracking_residual_limit=5.0e-4, - inner_dvclose=1.0e-5, - rcloserecord=[0.0001, "relative_rclose"], - inner_maximum=100, - relaxation_factor=0.0, - number_orthogonalizations=2, - preconditioner_levels=8, - preconditioner_drop_tolerance=0.001, - ) - gwf = flopy.mf6.ModflowGwf( - sim, modelname=name, save_flows=True, newtonoptions=["newton"] - ) - flopy.mf6.ModflowGwfdis( - gwf, - length_units=length_units, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - top=top, - botm=botm, - ) - flopy.mf6.ModflowGwfnpf( - gwf, - save_specific_discharge=True, - save_saturation=True, - icelltype=1, - k=hydraulic_conductivity, - ) - flopy.mf6.ModflowGwfic(gwf, strt=600.0) - chdspd = [[(k, 0, 0), h1] for k in range(nlay) if botm[k] < h1] - chdspd += [[(k, 0, ncol - 1), h2] for k in range(nlay) if botm[k] < h2] - flopy.mf6.ModflowGwfchd( - gwf, - stress_period_data=chdspd, - print_input=True, - print_flows=True, - save_flows=False, - pname="CHD-1", - ) - flopy.mf6.ModflowGwfrch( - gwf, - stress_period_data=rchspd, - auxiliary=["concentration"], - pname="RCH-1", - ) - - head_filerecord = f"{name}.hds" - budget_filerecord = f"{name}.bud" - flopy.mf6.ModflowGwfoc( - gwf, - head_filerecord=head_filerecord, - budget_filerecord=budget_filerecord, - saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], - ) - return sim - - -def build_mf6gwt(sim_folder): - ws = os.path.join(sim_folder, "mf6-gwt-keating") - name = "trans" - sim_ws = os.path.join(ws, "mf6gwt") - sim = flopy.mf6.MFSimulation( - sim_name=name, - sim_ws=sim_ws, - exe_name="mf6", - continue_=True, - ) - tdis_ds = ((period1, 73, 1.0), (period2, 2927, 1.0)) - flopy.mf6.ModflowTdis( - sim, nper=len(tdis_ds), perioddata=tdis_ds, time_units=time_units - ) - flopy.mf6.ModflowIms( - sim, - print_option="summary", - outer_dvclose=1.0e-4, - outer_maximum=100, - under_relaxation="none", - linear_acceleration="BICGSTAB", - rcloserecord=[1000.0, "strict"], - inner_maximum=20, - inner_dvclose=1.0e-4, - relaxation_factor=0.0, - number_orthogonalizations=2, - preconditioner_levels=8, - preconditioner_drop_tolerance=0.001, - ) - gwt = flopy.mf6.ModflowGwt(sim, modelname=name, save_flows=True) - flopy.mf6.ModflowGwtdis( - gwt, - length_units=length_units, - nlay=nlay, - nrow=nrow, - ncol=ncol, - delr=delr, - delc=delc, - top=top, - botm=botm, - ) - flopy.mf6.ModflowGwtic(gwt, strt=0) - flopy.mf6.ModflowGwtmst(gwt, porosity=porosity) - flopy.mf6.ModflowGwtadv(gwt, scheme="upstream") - flopy.mf6.ModflowGwtdsp( - gwt, xt3d_off=True, alh=alpha_l, ath1=alpha_th, atv=alpha_tv - ) - pd = [ - ("GWFHEAD", "../mf6gwf/flow.hds"), - ("GWFBUDGET", "../mf6gwf/flow.bud"), - ] - flopy.mf6.ModflowGwtfmi( - gwt, flow_imbalance_correction=True, packagedata=pd - ) - sourcerecarray = [ - ("RCH-1", "AUX", "CONCENTRATION"), - ] - flopy.mf6.ModflowGwtssm(gwt, sources=sourcerecarray) - saverecord = { - 0: [ - ("CONCENTRATION", "STEPS", 10), - ("CONCENTRATION", "LAST"), - ("CONCENTRATION", "FREQUENCY", 10), - ], - 1: [ - ("CONCENTRATION", "STEPS", 27, 227), - ("CONCENTRATION", "LAST"), - ("CONCENTRATION", "FREQUENCY", 10), - ], - } - flopy.mf6.ModflowGwtoc( - gwt, - budget_filerecord=f"{name}.cbc", - concentration_filerecord=f"{name}.ucn", - concentrationprintrecord=[ - ("COLUMNS", ncol, "WIDTH", 15, "DIGITS", 6, "GENERAL") - ], - saverecord=saverecord, - printrecord=[ - ("CONCENTRATION", "LAST"), - ( - "BUDGET", - "ALL", - ), - ], - ) - obs_data = { - f"{name}.obs.csv": [ - ("obs1", "CONCENTRATION", obs1), - ("obs2", "CONCENTRATION", obs2), - ], - } - flopy.mf6.ModflowUtlobs( - gwt, digits=10, print_input=True, continuous=obs_data - ) - return sim - - -def build_model(ws): - sim_mf6gwf = build_mf6gwf(ws) - sim_mf6gwt = build_mf6gwt(ws) - sim_mf2005 = None # build_mf2005(sim_name) - sim_mt3dms = None # build_mt3dms(sim_name, sim_mf2005) - sims = (sim_mf6gwf, sim_mf6gwt, sim_mf2005, sim_mt3dms) - return sims - - -def write_model(sims, silent=True): - sim_mf6gwf, sim_mf6gwt, sim_mf2005, sim_mt3dms = sims - sim_mf6gwf.write_simulation(silent=silent) - sim_mf6gwt.write_simulation(silent=silent) - - -def run_keating_model(ws=example_name, silent=True): - sim = build_model(ws) - write_model(sim, silent=silent) - sim_mf6gwf, sim_mf6gwt, sim_mf2005, sim_mt3dms = sim - - print("Running mf6gwf model...") - success, buff = sim_mf6gwf.run_simulation(silent=silent) - if not success: - print(buff) - - print("Running mf6gwt model...") - success, buff = sim_mf6gwt.run_simulation(silent=silent) - if not success: - print(buff) - - return success - - -if __name__ == "__main__": - run() - run_keating_model() diff --git a/examples/data/groundwater2023/geometries.yml b/examples/data/groundwater2023/geometries.yml new file mode 100644 index 000000000..c45d7d580 --- /dev/null +++ b/examples/data/groundwater2023/geometries.yml @@ -0,0 +1,137 @@ +boundary: |+ + 1.868012422360248456e+05 4.695652173913043953e+04 + 1.790372670807453396e+05 5.204968944099379587e+04 + 1.729813664596273447e+05 5.590062111801243009e+04 + 1.672360248447204940e+05 5.987577639751553215e+04 + 1.631987577639751253e+05 6.335403726708075556e+04 + 1.563664596273291972e+05 6.819875776397516893e+04 + 1.509316770186335489e+05 7.229813664596274612e+04 + 1.453416149068323139e+05 7.527950310559007630e+04 + 1.395962732919254631e+05 7.627329192546584818e+04 + 1.357142857142857101e+05 7.664596273291927355e+04 + 1.329192546583850926e+05 7.751552795031057030e+04 + 1.268633540372670832e+05 8.062111801242237561e+04 + 1.218944099378881947e+05 8.285714285714286962e+04 + 1.145962732919254486e+05 8.571428571428572468e+04 + 1.069875776397515583e+05 8.869565217391305487e+04 + 1.023291925465838431e+05 8.931677018633540138e+04 + 9.456521739130433707e+04 9.068322981366459862e+04 + 8.804347826086955320e+04 9.080745341614908830e+04 + 7.950310559006211406e+04 9.267080745341615693e+04 + 7.562111801242236106e+04 9.391304347826087906e+04 + 6.692546583850930620e+04 9.602484472049689793e+04 + 5.667701863354037778e+04 9.763975155279504543e+04 + 4.906832298136646568e+04 9.689440993788820924e+04 + 3.897515527950309479e+04 9.540372670807455142e+04 + 3.167701863354036323e+04 9.304347826086958230e+04 + 2.375776397515527788e+04 8.757763975155279331e+04 + 1.847826086956521613e+04 8.161490683229814749e+04 + 1.164596273291925172e+04 7.739130434782608063e+04 + 6.211180124223596977e+03 7.055900621118013805e+04 + 4.347826086956512881e+03 6.422360248447205959e+04 + 1.863354037267072272e+03 6.037267080745341809e+04 + 2.639751552795024509e+03 5.602484472049689793e+04 + 1.552795031055893560e+03 5.279503105590062478e+04 + 7.763975155279410956e+02 4.186335403726709046e+04 + 2.018633540372667312e+03 3.813664596273292409e+04 + 6.055900621118013078e+03 3.341614906832297856e+04 + 1.335403726708074100e+04 2.782608695652173992e+04 + 2.577639751552794405e+04 2.086956521739130767e+04 + 3.416149068322980747e+04 1.763975155279503815e+04 + 4.642857142857142753e+04 1.440993788819875044e+04 + 5.636645962732918997e+04 1.130434782608694877e+04 + 6.459627329192546313e+04 9.813664596273290954e+03 + 8.555900621118012350e+04 6.832298136645956220e+03 + 9.829192546583850344e+04 5.093167701863346338e+03 + 1.085403726708074391e+05 4.347826086956525614e+03 + 1.200310559006211115e+05 4.223602484472040487e+03 + 1.296583850931677007e+05 4.347826086956525614e+03 + 1.354037267080745369e+05 5.590062111801232277e+03 + 1.467391304347825935e+05 1.267080745341615875e+04 + 1.563664596273291972e+05 1.937888198757762802e+04 + 1.630434782608695677e+05 2.198757763975155467e+04 + 1.694099378881987650e+05 2.434782608695652743e+04 + 1.782608695652173774e+05 2.981366459627329095e+04 + 1.833850931677018234e+05 3.180124223602484562e+04 + 1.868012422360248456e+05 3.577639751552795497e+04 +streamseg1: |+ + 1.868012422360248456e+05 4.086956521739130403e+04 + 1.824534161490683327e+05 4.086956521739130403e+04 + 1.770186335403726553e+05 4.124223602484472940e+04 + 1.737577639751552779e+05 4.186335403726709046e+04 + 1.703416149068323139e+05 4.310559006211180531e+04 + 1.670807453416148783e+05 4.397515527950310934e+04 + 1.636645962732919143e+05 4.484472049689441337e+04 + 1.590062111801242281e+05 4.559006211180124228e+04 + 1.555900621118012350e+05 4.559006211180124228e+04 + 1.510869565217391064e+05 4.546583850931677443e+04 + 1.479813664596273156e+05 4.534161490683229931e+04 + 1.453416149068323139e+05 4.496894409937888850e+04 + 1.377329192546583654e+05 4.447204968944099528e+04 + 1.326086956521739194e+05 4.447204968944099528e+04 + 1.285714285714285652e+05 4.434782608695652743e+04 + 1.245341614906832110e+05 4.472049689440993825e+04 + 1.215838509316770069e+05 4.509316770186335634e+04 + 1.161490683229813585e+05 4.509316770186335634e+04 + 1.125776397515527933e+05 4.459627329192547040e+04 + 1.074534161490683036e+05 4.385093167701864149e+04 + 1.018633540372670686e+05 4.347826086956522340e+04 + 9.798136645962731563e+04 4.360248447204969125e+04 + 9.223602484472049400e+04 4.310559006211180531e+04 + 8.602484472049689793e+04 4.198757763975155831e+04 + 7.981366459627327276e+04 4.173913043478261534e+04 + 7.468944099378881219e+04 4.248447204968944425e+04 + 7.034161490683228476e+04 4.385093167701864149e+04 + 6.785714285714285506e+04 4.621118012422360334e+04 + 6.583850931677018525e+04 4.919254658385094081e+04 + 6.319875776397513982e+04 5.192546583850932075e+04 + 6.009316770186335634e+04 5.677018633540373412e+04 + 5.605590062111800216e+04 5.950310559006211406e+04 + 5.279503105590060295e+04 6.124223602484472940e+04 + 4.751552795031056303e+04 6.211180124223603343e+04 + 3.990683229813664366e+04 6.335403726708075556e+04 + 3.276397515527949508e+04 6.409937888198757719e+04 + 2.934782608695651652e+04 6.509316770186336362e+04 + 2.546583850931676716e+04 6.832298136645962950e+04 +streamseg2: |+ + 7.025161490683228476e+04 4.375093167701864149e+04 + 6.816770186335404287e+04 4.273291925465839449e+04 + 6.490683229813665093e+04 4.211180124223603343e+04 + 6.164596273291925900e+04 4.173913043478262261e+04 + 5.776397515527951327e+04 4.124223602484472940e+04 + 5.450310559006211406e+04 4.049689440993789322e+04 + 4.984472049689442065e+04 3.937888198757764621e+04 + 4.534161490683231386e+04 3.801242236024845624e+04 + 4.114906832298137306e+04 3.664596273291926627e+04 + 3.913043478260868869e+04 3.565217391304348712e+04 + 3.649068322981366509e+04 3.416149068322981475e+04 + 3.322981366459628043e+04 3.242236024844721760e+04 + 3.012422360248447148e+04 3.105590062111801672e+04 + 2.608695652173913550e+04 2.957521739130435890e+04 +streamseg3: |+ + 1.059006211180124228e+05 4.335403726708074828e+04 + 1.029503105590062187e+05 4.223602484472050128e+04 + 1.004658385093167890e+05 4.024844720496894297e+04 + 9.937888198757765349e+04 3.788819875776398112e+04 + 9.627329192546584818e+04 3.490683229813664366e+04 + 9.285714285714286962e+04 3.316770186335403559e+04 + 8.897515527950311662e+04 3.093167701863354159e+04 + 8.338509316770188161e+04 2.795031055900621504e+04 + 7.872670807453416637e+04 2.670807453416148928e+04 + 7.329192546583851799e+04 2.385093167701863058e+04 + 6.863354037267081731e+04 2.111801242236025064e+04 + 6.304347826086958230e+04 1.863354037267081003e+04 +streamseg4: |+ + 1.371118012422360480e+05 4.472049689440994553e+04 + 1.321428571428571595e+05 4.720496894409938250e+04 + 1.285714285714285652e+05 4.981366459627330187e+04 + 1.243788819875776535e+05 5.341614906832298584e+04 + 1.189440993788819906e+05 5.540372670807454415e+04 + 1.125776397515527933e+05 5.627329192546584818e+04 + 1.065217391304347839e+05 5.726708074534162733e+04 + 1.020186335403726698e+05 5.913043478260870324e+04 + 9.409937888198759174e+04 6.273291925465840177e+04 + 9.192546583850932075e+04 6.633540372670808574e+04 + 8.881987577639751544e+04 7.242236024844722124e+04 + 8.586956521739131131e+04 7.552795031055902655e+04 + 8.369565217391305487e+04 7.962732919254660374e+04 From bad483b3910218dc828c993863d540793111090d Mon Sep 17 00:00:00 2001 From: Joshua Larsen Date: Wed, 17 Jul 2024 09:44:13 -0700 Subject: [PATCH 47/57] update(Raster): add new methods and checks (#2267) * update(Raster): add new methods and checks * add feature `to_crs()` for re-projecting rasters * add static method `raster_from_array()` to allow users to make rasters from data * update `resample_to_grid()` - removed multithread and thread_pool kwargs - added initial check for raster/modelgrid intersection * add testing for raster improvements * Updates for raster_intersection_example.py * Linting * linting part 2 * Catch point with GeoSpatialUtil() in add_region() --- .../Notebooks/raster_intersection_example.py | 27 +- autotest/test_gridintersect.py | 108 ++++++++ flopy/utils/rasters.py | 256 ++++++++++++++++-- flopy/utils/triangle.py | 1 + 4 files changed, 374 insertions(+), 18 deletions(-) diff --git a/.docs/Notebooks/raster_intersection_example.py b/.docs/Notebooks/raster_intersection_example.py index 4c5eecbcd..447581faa 100644 --- a/.docs/Notebooks/raster_intersection_example.py +++ b/.docs/Notebooks/raster_intersection_example.py @@ -1,11 +1,12 @@ # --- # jupyter: # jupytext: +# notebook_metadata_filter: metadata # text_representation: # extension: .py # format_name: light # format_version: '1.5' -# jupytext_version: 1.14.5 +# jupytext_version: 1.14.4 # kernelspec: # display_name: Python 3 (ipykernel) # language: python @@ -570,6 +571,30 @@ # The `ibound` array and the `top` array can be used to build or edit the BAS and DIS file objects in FloPy +# ## Raster re-projection +# +# The `Raster` class has a built in `to_crs()` method that allows for raster reprojection. The `to_crs()` method has two possible parameters that can be used to define reprojection and one additional parameter for in place reprojection: +# +# - `crs`: the crs parameter can take many different formats of coordinate refence systems (WKT string, epsg code, pyproj.CRS, rasterio.CRS, proj4 string, epsg string, etc...) +# - `epsg`: integer epsg number +# - `inplace`: bool, default False creates a new raster object, True modifies the existing Raster object +# +# Here's example usage: + +cur_crs = rio.crs +print(cur_crs) +print(rio.transform) + +rio_reproj = rio.to_crs(crs="EPSG:4326") # WGS84 dec. lat lon +print(rio_reproj.crs) +print(rio_reproj.transform) + +# Reproject as an inplace operation + +rio.to_crs(epsg=4326, inplace=True) +print(rio.crs) +print(rio.transform) + # ## Future development # # Potential features that draw on this functionality could include: diff --git a/autotest/test_gridintersect.py b/autotest/test_gridintersect.py index 4497cc3a2..268de09f9 100644 --- a/autotest/test_gridintersect.py +++ b/autotest/test_gridintersect.py @@ -1449,6 +1449,114 @@ def test_raster_sampling_methods(example_data_path): ) +@requires_pkg("rasterio") +def test_raster_reprojection(example_data_path): + ws = example_data_path / "options" / "dem" + raster_name = "dem.img" + + wgs_epsg = 4326 + wgs_xmin = -120.32116799649168 + wgs_ymax = 39.46620605907534 + + raster = Raster.load(ws / raster_name) + + print(raster.crs.to_epsg()) + wgs_raster = raster.to_crs(crs=f"EPSG:{wgs_epsg}") + + if not wgs_raster.crs.to_epsg() == wgs_epsg: + raise AssertionError(f"Raster not converted to EPSG {wgs_epsg}") + + transform = wgs_raster._meta["transform"] + if not np.isclose(transform.c, wgs_xmin) and not np.isclose( + transform.f, wgs_ymax + ): + raise AssertionError(f"Raster not reprojected to EPSG {wgs_epsg}") + + raster.to_crs(epsg=wgs_epsg, inplace=True) + transform2 = raster._meta["transform"] + for ix, val in enumerate(transform): + if not np.isclose(val, transform2[ix]): + raise AssertionError("In place reprojection not working") + + +@requires_pkg("rasterio") +def test_create_raster_from_array_modelgrid(example_data_path): + ws = example_data_path / "options" / "dem" + raster_name = "dem.img" + + raster = Raster.load(ws / raster_name) + + xsize = 200 + ysize = 100 + xmin, xmax, ymin, ymax = raster.bounds + + nbands = 5 + nlay = 1 + nrow = int(np.floor((ymax - ymin) / ysize)) + ncol = int(np.floor((xmax - xmin) / xsize)) + + delc = np.full((nrow,), ysize) + delr = np.full((ncol,), xsize) + + grid = flopy.discretization.StructuredGrid( + delc=delc, + delr=delr, + top=np.ones((nrow, ncol)), + botm=np.zeros((nlay, nrow, ncol)), + idomain=np.ones((nlay, nrow, ncol), dtype=int), + xoff=xmin, + yoff=ymin, + crs=raster.crs, + ) + + array = np.random.random((grid.ncpl * nbands,)) * 100 + robj = Raster.raster_from_array(array, grid) + + if nbands != len(robj.bands): + raise AssertionError("Number of raster bands is incorrect") + + array = array.reshape((nbands, nrow, ncol)) + for band in robj.bands: + ra = robj.get_array(band) + np.testing.assert_allclose( + array[band - 1], + ra, + err_msg="Array not properly reshaped or converted to raster", + ) + + +@requires_pkg("rasterio", "affine") +def test_create_raster_from_array_transform(example_data_path): + import affine + + ws = example_data_path / "options" / "dem" + raster_name = "dem.img" + + raster = Raster.load(ws / raster_name) + + transform = raster._meta["transform"] + array = raster.get_array(band=raster.bands[0]) + + array = np.expand_dims(array, axis=0) + # same location but shrink raster by factor 2 + new_transform = affine.Affine( + transform.a / 2, 0, transform.c, 0, transform.e / 2, transform.f + ) + + robj = Raster.raster_from_array( + array, crs=raster.crs, transform=new_transform + ) + + rxmin, rxmax, rymin, rymax = robj.bounds + xmin, xmax, ymin, ymax = raster.bounds + + if ( + not ((xmax - xmin) / (rxmax - rxmin)) == 2 + or not ((ymax - ymin) / (rymax - rymin)) == 2 + ): + raise AssertionError("Transform based raster not working properly") + + if __name__ == "__main__": sgr = get_rect_grid(angrot=45.0, xyoffset=10.0) ls = LineString([(5, 10.0 + np.sqrt(200.0)), (15, 10.0 + np.sqrt(200.0))]) diff --git a/flopy/utils/rasters.py b/flopy/utils/rasters.py index e56fee2cd..fe9155121 100644 --- a/flopy/utils/rasters.py +++ b/flopy/utils/rasters.py @@ -95,10 +95,8 @@ def __init__( if isinstance(crs, CRS): pass - elif isinstance(crs, int): - crs = CRS.from_epsg(crs) - elif isinstance(crs, str): - crs = CRS.from_string(crs) + elif crs is not None: + crs = CRS.from_user_input(crs) else: TypeError("crs type not understood, provide an epsg or proj4") @@ -126,6 +124,13 @@ def __init__( if isinstance(rio_ds, rasterio.io.DatasetReader): self._dataset = rio_ds + @property + def crs(self): + """ + Returns a rasterio CRS object + """ + return self._meta["crs"] + @property def bounds(self): """ @@ -140,6 +145,13 @@ def bounds(self): return xmin, xmax, ymin, ymax + @property + def transform(self): + """ + Returns the affine transform for the raster + """ + return self._meta["transform"] + @property def bands(self): """ @@ -184,6 +196,126 @@ def ycenters(self): self.__xycenters() return self.__ycenters + def to_crs(self, crs=None, epsg=None, inplace=False): + """ + Method for re-projecting rasters from an existing CRS to a + new CRS + + Parameters + ---------- + crs : CRS user input of many different kinds + epsg : int + epsg code input that defines the coordinate system + inplace : bool + Boolean flag to indicate if the operation takes place "in place" + which reprojects the raster within the current object or the + default (False) to_crs() returns a reprojected Raster object + + Returns + ------- + Raster or None: returns a reprojected raster object if + inplace=False, otherwise the reprojected information + overwrites the current Raster object + + """ + from rasterio.crs import CRS + + if self.crs is None: + raise ValueError( + "Cannot transform naive geometries. " + "Please set a crs on the object first." + ) + if crs is not None: + dst_crs = CRS.from_user_input(crs) + elif epsg is not None: + dst_crs = CRS.from_epsg(epsg) + else: + raise ValueError("Must pass either crs or epsg.") + + # skip if the input CRS and output CRS are the exact same + if self.crs.to_epsg() == dst_crs.to_epsg(): + return self + + return self.__transform(dst_crs=dst_crs, inplace=inplace) + + def __transform(self, dst_crs, inplace): + """ + + Parameters + ---------- + dst_crs : rasterio.CRS object + inplace : bool + + Returns + ------- + Raster or None: returns a reprojected raster object if + inplace=False, otherwise the reprojected information + overwrites the current Raster object + + """ + import rasterio + from rasterio.io import MemoryFile + from rasterio.warp import ( + Resampling, + calculate_default_transform, + reproject, + ) + + height = self._meta["height"] + width = self._meta["width"] + xmin, xmax, ymin, ymax = self.bounds + + transform, width, height = calculate_default_transform( + self.crs, dst_crs, width, height, xmin, ymin, xmax, ymax + ) + + kwargs = { + "transform": transform, + "width": width, + "height": height, + "crs": dst_crs, + "nodata": self.nodatavals[0], + "driver": self._meta["driver"], + "count": self._meta["count"], + "dtype": self._meta["dtype"], + } + + with MemoryFile() as memfile: + with memfile.open(**kwargs) as dst: + for band in self.bands: + reproject( + source=self.get_array(band), + destination=rasterio.band(dst, band), + src_transform=self.transform, + src_crs=self.crs, + dst_transform=transform, + dst_crs=dst_crs, + resampling=Resampling.nearest, + ) + with memfile.open() as dataset: + array = dataset.read() + bands = dataset.indexes + meta = dataset.meta + + if inplace: + for ix, band in enumerate(bands): + self.__arr_dict[band] = array[ix] + + self.__xcenters = None + self.__ycenters = None + self._meta.update({k: v for k, v in kwargs.items()}) + self._dataset = None + + else: + return Raster( + array, + bands, + meta["crs"], + meta["transform"], + meta["nodata"], + meta["driver"], + ) + def __xycenters(self): """ Method to create np.arrays of the xy-cell centers @@ -327,8 +459,6 @@ def resample_to_grid( modelgrid, band, method="nearest", - multithread=False, - thread_pool=2, extrapolate_edges=False, ): """ @@ -363,11 +493,6 @@ def resample_to_grid( `'mode'` for majority sampling - multithread : bool - DEPRECATED boolean flag indicating if multithreading should be - used with the ``mean`` and ``median`` sampling methods - thread_pool : int - DEPRECATED number of threads to use for mean and median sampling extrapolate_edges : bool boolean flag indicating if areas without data should be filled using the ``nearest`` interpolation method. This option @@ -377,16 +502,18 @@ def resample_to_grid( ------- np.array """ - if multithread: - warnings.warn( - "multithread option has been deprecated and will be removed " - "in flopy version 3.3.8" - ) - import_optional_dependency("scipy") rasterstats = import_optional_dependency("rasterstats") from scipy.interpolate import griddata + xmin, xmax, ymin, ymax = modelgrid.extent + rxmin, rxmax, rymin, rymax = self.bounds + if any([rxmax < xmin, rxmin > xmax, rymax < ymin, rymin > ymax]): + raise AssertionError( + "Raster and model grid do not intersect. Check that the grid " + "and raster are in the same coordinate reference system" + ) + method = method.lower() if method in ("linear", "nearest", "cubic"): xc = modelgrid.xcellcenters @@ -770,6 +897,101 @@ def load(raster: Union[str, os.PathLike]): meta["driver"], ) + @staticmethod + def raster_from_array( + array, + modelgrid=None, + nodataval=1e-10, + crs=None, + transform=None, + ): + """ + Method to create a raster from an array. When using a modelgrid to + define the transform, delc and delr must be uniform in each dimension. + Otherwise, the user can define their own transform using the affine + package. + + Parameters + ---------- + array : np.ndarray + array of (n-bands, nrows, ncols) for the raster + modelgrid : flopy.discretization.StructuredGrid + StructuredGrid object (optional), but transform must be defined + if a StructuredGrid is not supplied + nodataval : (int, float) + Null value + crs : coordinate reference system input of many types + transform : affine.Affine + optional affine transform that defines the spatial parameters + of the raster. This must be supplied if a modelgrid is not + used to define the transform + + Returns + ------- + Raster object + """ + from affine import Affine + + if not isinstance(array, np.ndarray): + array = np.array(array) + + if modelgrid is not None: + if crs is None: + if modelgrid.crs is None: + raise ValueError( + "Cannot create a raster from a grid without a " + "coordinate reference system, please provide a crs " + "using crs=" + ) + crs = modelgrid.crs + + if modelgrid.grid_type != "structured": + raise TypeError( + f"{type(modelgrid)} discretizations are not supported" + ) + + if not np.all(modelgrid.delc == modelgrid.delc[0]): + raise AssertionError("DELC must have a uniform spacing") + + if not np.all(modelgrid.delr == modelgrid.delr[0]): + raise AssertionError("DELR must have a uniform spacing") + + yul = modelgrid.yvertices[0, 0] + xul = modelgrid.xvertices[0, 0] + angrot = modelgrid.angrot + transform = Affine( + modelgrid.delr[0], 0, xul, 0, -modelgrid.delc[0], yul + ) + + if angrot != 0: + transform *= Affine.rotation(angrot) + + if array.size % modelgrid.ncpl != 0: + raise AssertionError( + f"Array size {array.size} is not a multiple of the " + f"number of cells per layer in the model grid " + f"{modelgrid.ncpl}" + ) + + array = array.reshape((-1, modelgrid.nrow, modelgrid.ncol)) + + if transform is not None: + if crs is None: + raise ValueError( + "Cannot create a raster without a coordinate reference " + "system, please use crs= to provide a coordinate reference" + ) + + bands, height, width = array.shape + + return Raster( + array, + bands=list(range(1, bands + 1)), + crs=crs, + transform=transform, + nodataval=nodataval, + ) + def plot(self, ax=None, contour=False, **kwargs): """ Method to plot raster layers or contours. diff --git a/flopy/utils/triangle.py b/flopy/utils/triangle.py index d001bd73c..e378fb318 100644 --- a/flopy/utils/triangle.py +++ b/flopy/utils/triangle.py @@ -131,6 +131,7 @@ def add_region(self, point, attribute=0, maximum_area=None): None """ + point = GeoSpatialUtil(point, shapetype="point").points self._regions.append([point, attribute, maximum_area]) def build(self, verbose=False): From 7dec7c52db7c7bf3f8bca61de4d4a953ac1317d2 Mon Sep 17 00:00:00 2001 From: langevin-usgs Date: Fri, 26 Jul 2024 14:50:26 -0500 Subject: [PATCH 48/57] feat(lgr-disv): add to_disv_gridprops() method to lgr object (#2271) * feat(lgr-disv): add new to_disv_gridprops to lgr object * ruff ruff * modflow6 lgr tutorial was mistakenly marked as modflow-lgr tutorial * add test * add layer information to gridprops and update test * ruff * minor tweaks to require idomain the same for all layers * add deprecation warnings for gridlist_to_disv_gridprops * ruff * ruff --- ...gr_tutorial01.py => mf6_lgr_tutorial01.py} | 0 .docs/tutorials.rst | 10 +- autotest/test_export.py | 89 ++-- autotest/test_grid.py | 40 +- autotest/test_lgrutil.py | 74 ++- flopy/utils/cvfdutil.py | 13 + flopy/utils/lgrutil.py | 425 +++++++++++++++++- 7 files changed, 577 insertions(+), 74 deletions(-) rename .docs/Notebooks/{lgr_tutorial01.py => mf6_lgr_tutorial01.py} (100%) diff --git a/.docs/Notebooks/lgr_tutorial01.py b/.docs/Notebooks/mf6_lgr_tutorial01.py similarity index 100% rename from .docs/Notebooks/lgr_tutorial01.py rename to .docs/Notebooks/mf6_lgr_tutorial01.py diff --git a/.docs/tutorials.rst b/.docs/tutorials.rst index b1aaf3c17..7b2525504 100644 --- a/.docs/tutorials.rst +++ b/.docs/tutorials.rst @@ -35,6 +35,7 @@ MODFLOW 6 Notebooks/mf6_output_tutorial01 Notebooks/mf6_sfr_tutorial01 Notebooks/mf6_tutorial01 + Notebooks/mf6_lgr_tutorial01 MODFLOW-2005 @@ -50,15 +51,6 @@ MODFLOW-2005 Notebooks/mf_tutorial02 -MODFLOW-LGR ------------ - -.. toctree:: - :maxdepth: 2 - - Notebooks/lgr_tutorial01 - - MODFLOW-NWT ----------- diff --git a/autotest/test_export.py b/autotest/test_export.py index 4d9b9f379..0028ada34 100644 --- a/autotest/test_export.py +++ b/autotest/test_export.py @@ -1718,50 +1718,51 @@ def test_vtk_export_disv1_model(function_tmpdir): idomain=np.ones((nlay, nrow, ncol)), ) - from flopy.utils.cvfdutil import gridlist_to_disv_gridprops - - gridprops = gridlist_to_disv_gridprops([mg]) - gridprops["top"] = 0 - gridprops["botm"] = np.zeros((nlay, nrow * ncol), dtype=float) - 1 - gridprops["nlay"] = nlay - - disv = ModflowGwfdisv(gwf, **gridprops) - ic = ModflowGwfic(gwf, strt=10) - npf = ModflowGwfnpf(gwf) - - # Export model without specifying packages_names parameter - # create the vtk output - gwf = sim.get_model() - vtkobj = Vtk(gwf, binary=False) - vtkobj.add_model(gwf) - f = function_tmpdir / "gwf.vtk" - vtkobj.write(f) - - # load the output using the vtk standard library - gridreader = vtkUnstructuredGridReader() - gridreader.SetFileName(str(f)) - gridreader.Update() - grid = gridreader.GetOutput() - - # get the points - vtk_points = grid.GetPoints() - vtk_points = vtk_points.GetData() - vtk_points = vtk_to_numpy(vtk_points) - - # get cell locations (ia format of point to cell relationship) - cell_locations = vtk_to_numpy(grid.GetCellLocationsArray()) - cell_locations_answer = np.array([0, 8, 16, 24, 32, 40, 48, 56, 64]) - print(f"Found cell locations {cell_locations} in vtk file.") - print(f"Expecting cell locations {cell_locations_answer}") - errmsg = "vtk cell locations do not match expected result." - assert np.allclose(cell_locations, cell_locations_answer), errmsg - - cell_types = vtk_to_numpy(grid.GetCellTypesArray()) - cell_types_answer = np.array(9 * [42]) - print(f"Found cell types {cell_types} in vtk file.") - print(f"Expecting cell types {cell_types_answer}") - errmsg = "vtk cell types do not match expected result." - assert np.allclose(cell_types, cell_types_answer), errmsg + with pytest.deprecated_call(): + from flopy.utils.cvfdutil import gridlist_to_disv_gridprops + + gridprops = gridlist_to_disv_gridprops([mg]) + gridprops["top"] = 0 + gridprops["botm"] = np.zeros((nlay, nrow * ncol), dtype=float) - 1 + gridprops["nlay"] = nlay + + disv = ModflowGwfdisv(gwf, **gridprops) + ic = ModflowGwfic(gwf, strt=10) + npf = ModflowGwfnpf(gwf) + + # Export model without specifying packages_names parameter + # create the vtk output + gwf = sim.get_model() + vtkobj = Vtk(gwf, binary=False) + vtkobj.add_model(gwf) + f = function_tmpdir / "gwf.vtk" + vtkobj.write(f) + + # load the output using the vtk standard library + gridreader = vtkUnstructuredGridReader() + gridreader.SetFileName(str(f)) + gridreader.Update() + grid = gridreader.GetOutput() + + # get the points + vtk_points = grid.GetPoints() + vtk_points = vtk_points.GetData() + vtk_points = vtk_to_numpy(vtk_points) + + # get cell locations (ia format of point to cell relationship) + cell_locations = vtk_to_numpy(grid.GetCellLocationsArray()) + cell_locations_answer = np.array([0, 8, 16, 24, 32, 40, 48, 56, 64]) + print(f"Found cell locations {cell_locations} in vtk file.") + print(f"Expecting cell locations {cell_locations_answer}") + errmsg = "vtk cell locations do not match expected result." + assert np.allclose(cell_locations, cell_locations_answer), errmsg + + cell_types = vtk_to_numpy(grid.GetCellTypesArray()) + cell_types_answer = np.array(9 * [42]) + print(f"Found cell types {cell_types} in vtk file.") + print(f"Expecting cell types {cell_types_answer}") + errmsg = "vtk cell types do not match expected result." + assert np.allclose(cell_types, cell_types_answer), errmsg @pytest.mark.mf6 diff --git a/autotest/test_grid.py b/autotest/test_grid.py index f716d20bc..142fed140 100644 --- a/autotest/test_grid.py +++ b/autotest/test_grid.py @@ -943,25 +943,27 @@ def test_tocvfd3(): yoff=200, idomain=idomain, ) - gridprops = gridlist_to_disv_gridprops([sg1, sg2]) - assert "ncpl" in gridprops - assert "nvert" in gridprops - assert "vertices" in gridprops - assert "cell2d" in gridprops - - ncpl = gridprops["ncpl"] - nvert = gridprops["nvert"] - vertices = gridprops["vertices"] - cell2d = gridprops["cell2d"] - assert ncpl == 121 - assert nvert == 148 - assert len(vertices) == nvert - assert len(cell2d) == 121 - - # spot check information for cell 28 (zero based) - answer = [28, 250.0, 150.0, 7, 38, 142, 143, 45, 46, 44, 38] - for i, j in zip(cell2d[28], answer): - assert i == j, f"{i} not equal {j}" + + with pytest.deprecated_call(): + gridprops = gridlist_to_disv_gridprops([sg1, sg2]) + assert "ncpl" in gridprops + assert "nvert" in gridprops + assert "vertices" in gridprops + assert "cell2d" in gridprops + + ncpl = gridprops["ncpl"] + nvert = gridprops["nvert"] + vertices = gridprops["vertices"] + cell2d = gridprops["cell2d"] + assert ncpl == 121 + assert nvert == 148 + assert len(vertices) == nvert + assert len(cell2d) == 121 + + # spot check information for cell 28 (zero based) + answer = [28, 250.0, 150.0, 7, 38, 142, 143, 45, 46, 44, 38] + for i, j in zip(cell2d[28], answer): + assert i == j, f"{i} not equal {j}" @requires_pkg("shapely") diff --git a/autotest/test_lgrutil.py b/autotest/test_lgrutil.py index 439e4d5e6..67d7a5325 100644 --- a/autotest/test_lgrutil.py +++ b/autotest/test_lgrutil.py @@ -1,6 +1,6 @@ import numpy as np -from flopy.utils.lgrutil import Lgr +from flopy.utils.lgrutil import Lgr, LgrToDisv def test_lgrutil(): @@ -155,3 +155,75 @@ def test_lgrutil2(): ] assert np.allclose(lgr.delr, answer), f"{lgr.delr} /= {answer}" assert np.allclose(lgr.delc, answer), f"{lgr.delc} /= {answer}" + + +def test_lgrutil3(): + # Define parent grid information + xoffp = 0.0 + yoffp = 0.0 + nlayp = 3 + nrowp = 3 + ncolp = 3 + + dx = 100.0 + dy = 100.0 + dz = 10.0 + delrp = dx * np.ones(ncolp) + delcp = dy * np.ones(nrowp) + topp = dz * np.ones((nrowp, ncolp), dtype=float) + botmp = np.empty((nlayp, nrowp, ncolp), dtype=float) + for k in range(nlayp): + botmp[k] = -(k + 1) * dz + idomainp = np.ones((nlayp, nrowp, ncolp), dtype=int) + idomainp[:, nrowp // 2, ncolp // 2] = 0 + ncpp = 3 + ncppl = nlayp * [1] + lgr = Lgr( + nlayp, + nrowp, + ncolp, + delrp, + delcp, + topp, + botmp, + idomainp, + ncpp=ncpp, + ncppl=ncppl, + xllp=xoffp, + yllp=yoffp, + ) + + # check to make sure gridprops is accessible from lgr + gridprops = lgr.to_disv_gridprops() + assert "ncpl" in gridprops + assert "nvert" in gridprops + assert "vertices" in gridprops + assert "nlay" in gridprops + assert "top" in gridprops + assert "botm" in gridprops + assert gridprops["ncpl"] == 17 + assert gridprops["nvert"] == 32 + assert gridprops["nlay"] == 3 + + # test the lgr to disv class + lgrtodisv = LgrToDisv(lgr) + + # test guts of LgrToDisv to make sure hanging vertices added correctly + assert lgrtodisv.right_face_hanging[(1, 0)] == [0, 4, 8, 12] + assert lgrtodisv.left_face_hanging[(1, 2)] == [3, 7, 11, 15] + assert lgrtodisv.back_face_hanging[(2, 1)] == [12, 13, 14, 15] + assert lgrtodisv.front_face_hanging[(0, 1)] == [0, 1, 2, 3] + + assert lgrtodisv.iverts[1] == [1, 2, 6, 18, 17, 5] + assert lgrtodisv.iverts[3] == [4, 5, 20, 24, 9, 8] + assert lgrtodisv.iverts[4] == [6, 7, 11, 10, 27, 23] + assert lgrtodisv.iverts[6] == [9, 29, 30, 10, 14, 13] + + assert np.allclose(gridprops["top"], dz * np.ones((17,))) + + assert gridprops["botm"].shape == (3, 17) + b = np.empty((3, 17)) + b[0] = -dz + b[1] = -2 * dz + b[2] = -3 * dz + assert np.allclose(gridprops["botm"], b) diff --git a/flopy/utils/cvfdutil.py b/flopy/utils/cvfdutil.py index fea3b5602..3a59031d2 100644 --- a/flopy/utils/cvfdutil.py +++ b/flopy/utils/cvfdutil.py @@ -1,3 +1,5 @@ +import warnings + import numpy as np import pandas as pd @@ -390,6 +392,10 @@ def gridlist_to_disv_gridprops(gridlist): be numbered according to consecutive numbering of active cells in the grid list. + This function is deprecated in 3.8 and will be removed in 3.9. Use the + functionality in flopy.utils.cvfdutil.Lgr() to create a DISV mesh for a + nested grid. + Parameters ---------- gridlist : list @@ -403,6 +409,13 @@ def gridlist_to_disv_gridprops(gridlist): modflow6 disv package. """ + warnings.warn( + "the gridlist_to_disv_gridprops function is deprecated and will be " + "removed in version 3.9. Use flopy.utils.cvfdutil.Lgr() instead, which " + "allows a nested grid to be created and exported to a DISV mesh.", + PendingDeprecationWarning, + ) + verts, iverts = gridlist_to_verts(gridlist) gridprops = get_disv_gridprops(verts, iverts) return gridprops diff --git a/flopy/utils/lgrutil.py b/flopy/utils/lgrutil.py index 043b1a999..467f46a70 100644 --- a/flopy/utils/lgrutil.py +++ b/flopy/utils/lgrutil.py @@ -2,6 +2,7 @@ from ..discretization import StructuredGrid from ..modflow import Modflow +from .cvfdutil import get_disv_gridprops from .util_array import Util2d, Util3d @@ -162,7 +163,7 @@ def __init__( assert idomainp.shape == (nlayp, nrowp, ncolp) self.idomain = idomainp idxl, idxr, idxc = np.asarray(idomainp == 0).nonzero() - assert idxl.shape[0] > 1, "no zero values found in idomain" + assert idxl.shape[0] > 0, "no zero values found in idomain" # child cells per parent and child cells per parent layer self.ncpp = ncpp @@ -585,3 +586,425 @@ def child(self): yorigin, ) return simple_regular_grid + + def to_disv_gridprops(self): + """ + Create and return a gridprops dictionary that can be + used to create a disv grid (instead of a separate parent + and child representation). The gridprops dictionary can + be unpacked into the flopy.mf6.Modflowdisv() constructor + and flopy.discretization.VertexGrid() contructor. + + Note that export capability will only work if the parent + and child models have corresponding layers. + + Returns + ------- + gridprops : dict + Dictionary containing ncpl, nvert, vertices, cell2d, + nlay, top, and botm + + """ + return LgrToDisv(self).get_disv_gridprops() + + +class LgrToDisv: + def __init__(self, lgr): + """ + Helper class used to convert and Lgr() object into + the grid properties needed to create a disv vertex + nested grid. After instantiation, self.verts and + self.iverts are available. + + The primary work of this class is identify hanging + vertices along the shared parent-child boundary and + include these hanging vertices in the vertex indicence + list for parent cells. + + Parameters + ---------- + lgr : Lgr instance + Lgr() object describing a parent-child relation + + """ + + # store information + self.lgr = lgr + self.pgrid = lgr.parent.modelgrid + self.cgrid = lgr.child.modelgrid + + # count active parent and child cells + self.ncpl_parent = np.count_nonzero(self.pgrid.idomain[0] > 0) + self.ncpl_child = np.count_nonzero(self.cgrid.idomain[0] > 0) + self.ncpl = self.ncpl_child + self.ncpl_parent + + # find child vertices that act as hanging vertices on parent + # model cells + self.right_face_hanging = None + self.left_face_hanging = None + self.front_face_hanging = None + self.back_face_hanging = None + self.parent_ij_to_global = None + self.child_ij_to_global = None + self.find_hanging_vertices() + + # build global verts and iverts keeping only idomain > 0 + self.verts = None + self.iverts = None + self.build_verts_iverts() + + # todo: remove unused vertices? + + def find_hanging_vertices(self): + """ + Hanging vertices are vertices that must be included + along the edge of parent cells. These hanging vertices + mark the locations of corners of adjacent child cells. + Hanging vertices are not strictly + necessary to define the shape of a parent cell, but they are + required by modflow to describe connections between + parent and child cells. + + This routine finds hanging vertices parent cells along + a parent-child boundary. These hanging vertices are + stored in 4 member dictionaries, called right_face_hanging, + left_face_hanging, front_face_hanging, and back_face_hanging. + These dictionaries are used subsequently to insert + hanging vertices into the iverts array. + + """ + + # create dictionaries for parent left, right, back, and front + # faces that have a key that is parent (row, col) + # and a value that is a list of child vertex numbers + + # this list of child vertex numbers will be ordered from + # left to right (back/front) and from back to front (left/right) + # so when they are used later, two of them will need to be + # reversed so that clockwise ordering is maintained + + nrowc = self.lgr.nrow + ncolc = self.lgr.ncol + iverts = self.cgrid.iverts + cidomain = self.lgr.get_idomain() + + self.right_face_hanging = {} + self.left_face_hanging = {} + self.front_face_hanging = {} + self.back_face_hanging = {} + + # map (i, j) to global cell number + self.parent_ij_to_global = {} + self.child_ij_to_global = {} + + kc = 0 + nodec = 0 + for ic in range(nrowc): + for jc in range(ncolc): + plist = self.lgr.get_parent_connections(kc, ic, jc) + for (kp, ip, jp), idir in plist: + if cidomain[kc, ic, jc] == 0: + continue + + if ( + idir == -1 + ): # left child face connected to right parent face + # child vertices 0 and 3 added as hanging nodes + if (ip, jp) in self.right_face_hanging: + hlist = self.right_face_hanging.pop((ip, jp)) + else: + hlist = [] + ivlist = iverts[nodec] + for iv in (ivlist[0], ivlist[3]): + if iv not in hlist: + hlist.append(iv) + self.right_face_hanging[(ip, jp)] = hlist + + elif idir == 1: + # child vertices 1 and 2 added as hanging nodes + if (ip, jp) in self.left_face_hanging: + hlist = self.left_face_hanging.pop((ip, jp)) + else: + hlist = [] + ivlist = iverts[nodec] + for iv in (ivlist[1], ivlist[2]): + if iv not in hlist: + hlist.append(iv) + self.left_face_hanging[(ip, jp)] = hlist + + elif idir == 2: + # child vertices 0 and 1 added as hanging nodes + if (ip, jp) in self.front_face_hanging: + hlist = self.front_face_hanging.pop((ip, jp)) + else: + hlist = [] + ivlist = iverts[nodec] + for iv in (ivlist[0], ivlist[1]): + if iv not in hlist: + hlist.append(iv) + self.front_face_hanging[(ip, jp)] = hlist + + elif idir == -2: + # child vertices 3 and 2 added as hanging nodes + if (ip, jp) in self.back_face_hanging: + hlist = self.back_face_hanging.pop((ip, jp)) + else: + hlist = [] + ivlist = iverts[nodec] + for iv in (ivlist[3], ivlist[2]): + if iv not in hlist: + hlist.append(iv) + self.back_face_hanging[(ip, jp)] = hlist + + nodec += 1 + + def build_verts_iverts(self): + """ + Build the verts and iverts members. self.verts is a 2d + numpy array of size (nvert, 2). Column 1 is x and column 2 + is y. self.iverts is a list of size ncpl (number of cells + per layer) with each entry being the list of vertex indices + that define the cell. + + """ + + # stack vertex arrays; these will have more points than necessary, + # because parent and child vertices will overlap at corners, but + # duplicate vertices will be filtered later + pverts = self.pgrid.verts + cverts = self.cgrid.verts + nverts_parent = pverts.shape[0] + nverts_child = cverts.shape[0] + verts = np.vstack((pverts, cverts)) + + # build iverts list first with active parent cells + iverts = [] + iglo = 0 + for i in range(self.pgrid.nrow): + for j in range(self.pgrid.ncol): + if self.pgrid.idomain[0, i, j] > 0: + ivlist = self.pgrid._build_structured_iverts(i, j) + + # merge hanging vertices if they exist + ivlist = self.merge_hanging_vertices(i, j, ivlist) + + iverts.append(ivlist) + self.parent_ij_to_global[(i, j)] = iglo + iglo += 1 + + # now add active child cells + for i in range(self.cgrid.nrow): + for j in range(self.cgrid.ncol): + if self.cgrid.idomain[0, i, j] > 0: + ivlist = [ + iv + nverts_parent + for iv in self.cgrid._build_structured_iverts(i, j) + ] + iverts.append(ivlist) + self.child_ij_to_global[(i, j)] = iglo + iglo += 1 + self.verts = verts + self.iverts = iverts + + def merge_hanging_vertices(self, ip, jp, ivlist): + """ + Given a list of vertices (ivlist) for parent row and column + (ip, jp) merge hanging vertices from adjacent child cells + into ivlist. + + Parameters + ---------- + ip : int + parent cell row number + + jp : int + parent cell column number + + ivlist : list of ints + list of vertex indices that define the parent + cell (ip, jp) + + Returns + ------- + ivlist : list of ints + modified list of vertices that now also contains + any hanging vertices needed to properly define + a parent cell adjacent to child cells + + """ + assert len(ivlist) == 4 + child_ivlist_offset = self.pgrid.verts.shape[0] + + # construct back edge + idx = 0 + reverse = False + face_hanging = self.back_face_hanging + back_edge = [ivlist[idx]] + if (ip, jp) in face_hanging: + hlist = face_hanging[(ip, jp)] + if len(hlist) > 2: + hlist = hlist[1:-1] # do not include two ends + hlist = [h + child_ivlist_offset for h in hlist] + if reverse: + hlist = hlist[::-1] + else: + hlist = [] + back_edge = [ivlist[idx]] + hlist + + # construct right edge + idx = 1 + reverse = False + face_hanging = self.right_face_hanging + right_edge = [ivlist[idx]] + if (ip, jp) in face_hanging: + hlist = face_hanging[(ip, jp)] + if len(hlist) > 2: + hlist = hlist[1:-1] # do not include two ends + hlist = [h + child_ivlist_offset for h in hlist] + if reverse: + hlist = hlist[::-1] + else: + hlist = [] + right_edge = [ivlist[idx]] + hlist + + # construct front edge + idx = 2 + reverse = True + face_hanging = self.front_face_hanging + front_edge = [ivlist[idx]] + if (ip, jp) in face_hanging: + hlist = face_hanging[(ip, jp)] + if len(hlist) > 2: + hlist = hlist[1:-1] # do not include two ends + hlist = [h + child_ivlist_offset for h in hlist] + if reverse: + hlist = hlist[::-1] + else: + hlist = [] + front_edge = [ivlist[idx]] + hlist + + # construct left edge + idx = 3 + reverse = True + face_hanging = self.left_face_hanging + left_edge = [ivlist[idx]] + if (ip, jp) in face_hanging: + hlist = face_hanging[(ip, jp)] + if len(hlist) > 2: + hlist = hlist[1:-1] # do not include two ends + hlist = [h + child_ivlist_offset for h in hlist] + if reverse: + hlist = hlist[::-1] + else: + hlist = [] + left_edge = [ivlist[idx]] + hlist + + ivlist = back_edge + right_edge + front_edge + left_edge + + return ivlist + + def get_xcyc(self): + """ + Construct a 2d array of size (nvert, 2) that + contains the cell centers. + + Returns + ------- + xcyc : ndarray + 2d array of x, y positions for cell centers + + """ + xcyc = np.empty((self.ncpl, 2)) + pidx = self.pgrid.idomain[0] > 0 + cidx = self.cgrid.idomain[0] > 0 + px = self.pgrid.xcellcenters[pidx].flatten() + cx = self.cgrid.xcellcenters[cidx].flatten() + xcyc[:, 0] = np.vstack( + (np.atleast_2d(px).T, np.atleast_2d(cx).T) + ).flatten() + py = self.pgrid.ycellcenters[pidx].flatten() + cy = self.cgrid.ycellcenters[cidx].flatten() + xcyc[:, 1] = np.vstack( + (np.atleast_2d(py).T, np.atleast_2d(cy).T) + ).flatten() + return xcyc + + def get_top(self): + """ + Construct a 1d array of size (ncpl) that + contains the cell tops. + + Returns + ------- + top : ndarray + 1d array of top elevations + + """ + top = np.empty((self.ncpl,)) + pidx = self.pgrid.idomain[0] > 0 + cidx = self.cgrid.idomain[0] > 0 + pa = self.pgrid.top[pidx].flatten() + ca = self.cgrid.top[cidx].flatten() + top[:] = np.hstack((pa, ca)) + return top + + def get_botm(self): + """ + Construct a 2d array of size (nlay, ncpl) that + contains the cell bottoms. + + Returns + ------- + botm : ndarray + 2d array of bottom elevations + + """ + botm = np.empty((self.lgr.nlay, self.ncpl)) + pidx = self.pgrid.idomain[0] > 0 + cidx = self.cgrid.idomain[0] > 0 + for k in range(self.lgr.nlay): + pa = self.pgrid.botm[k, pidx].flatten() + ca = self.cgrid.botm[k, cidx].flatten() + botm[k, :] = np.hstack((pa, ca)) + return botm + + def get_disv_gridprops(self): + """ + Create and return a gridprops dictionary that can be + used to create a disv grid (instead of a separate parent + and child representation). The gridprops dictionary can + be unpacked into the flopy.mf6.Modflowdisv() constructor + and flopy.discretization.VertexGrid() contructor. + + Note that export capability will only work if the parent + and child models have corresponding layers. + + Returns + ------- + gridprops : dict + Dictionary containing ncpl, nvert, vertices, cell2d, + nlay, top, and botm + + """ + + # check + assert ( + self.lgr.ncppl.min() == self.lgr.ncppl.max() + ), "Exporting disv grid properties requires ncppl to be 1." + assert ( + self.lgr.nlayp == self.lgr.nlay + ), "Exporting disv grid properties requires parent and child models to have the same number of layers." + for k in range(self.lgr.nlayp - 1): + assert np.allclose( + self.lgr.idomain[k], self.lgr.idomain[k + 1] + ), "Exporting disv grid properties requires parent idomain is same for all layers." + + # get information and build gridprops + xcyc = self.get_xcyc() + top = self.get_top() + botm = self.get_botm() + gridprops = get_disv_gridprops(self.verts, self.iverts, xcyc=xcyc) + gridprops["nlay"] = self.lgr.nlay + gridprops["top"] = top + gridprops["botm"] = botm + return gridprops From 00de95e334af450a5bd024801c143f72242fc62a Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Mon, 29 Jul 2024 13:28:24 -0400 Subject: [PATCH 49/57] docs(get-modflow): add note on mac/venv bindir discovery (#2275) Warn about #2274 --- .docs/md/get_modflow.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.docs/md/get_modflow.md b/.docs/md/get_modflow.md index 5e9cd3bcf..f1581376a 100644 --- a/.docs/md/get_modflow.md +++ b/.docs/md/get_modflow.md @@ -71,6 +71,8 @@ Other auto-select options are only available if the current user can write files - `:system` - use `/usr/local/bin` - `:windowsapps` - use `%LOCALAPPDATA%\Microsoft\WindowsApps` +**Note:** on macOS, the Python bin directory for a freshly created `venv` environment may not be detected until the environment is deactivated and reactivated. + ## Selecting a distribution By default the distribution from the [`MODFLOW-USGS/executables` repository](https://github.com/MODFLOW-USGS/executables) is installed. This includes the MODFLOW 6 binary `mf6` and over 20 other related programs. The utility can also install from the main [MODFLOW 6 repo](https://github.com/MODFLOW-USGS/modflow6) or the [nightly build](https://github.com/MODFLOW-USGS/modflow6-nightly-build) distributions, which contain only: From 4aab493853d381d7bb2c2aef49df231345f39d87 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Mon, 29 Jul 2024 14:18:17 -0400 Subject: [PATCH 50/57] ci: add shapely marker to binaryfile test (#2276) Fix the failing optional dependency CI tests: https://github.com/modflowpy/flopy/actions/runs/10140767481/job/28036619744#step:7:3899 --- autotest/test_binaryfile.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/autotest/test_binaryfile.py b/autotest/test_binaryfile.py index e56e20d9c..09351f4bf 100644 --- a/autotest/test_binaryfile.py +++ b/autotest/test_binaryfile.py @@ -11,7 +11,7 @@ import pytest from matplotlib import pyplot as plt from matplotlib.axes import Axes -from modflow_devtools.markers import requires_exe +from modflow_devtools.markers import requires_exe, requires_pkg import flopy from flopy.modflow import Modflow @@ -541,6 +541,7 @@ def test_binaryfile_reverse_mf6_dis(function_tmpdir): assert np.allclose(budget, -budget_rev) +@requires_pkg("shapely") def test_binaryfile_reverse_mf6_disv(function_tmpdir): name = "reverse_disv" sim = flopy.mf6.MFSimulation( From 576cefe5e9826a53a5085d7e3aee9ce7765be22f Mon Sep 17 00:00:00 2001 From: martclanor Date: Tue, 6 Aug 2024 21:44:16 +0200 Subject: [PATCH 51/57] fix(mfmodel): fix get_ims_package (#2272) This fixes MFModel.get_ims_package which is a method that returns the relevant IMS through the simulation name files's solution group block. --- autotest/test_mf6.py | 3 +++ flopy/mf6/mfmodel.py | 12 ++++++++---- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/autotest/test_mf6.py b/autotest/test_mf6.py index 59ae0336f..2e1d93161 100644 --- a/autotest/test_mf6.py +++ b/autotest/test_mf6.py @@ -2245,6 +2245,9 @@ def test_multi_model(function_tmpdir): assert rec_array[0][3] == model_names[1] assert rec_array[1][1] == "transport.ims" assert rec_array[1][2] == model_names[2] + assert gwf1.get_ims_package() is gwf2.get_ims_package() + assert gwf1.get_ims_package().filename == "flow.ims" + assert gwt.get_ims_package().filename == "transport.ims" # test ssm fileinput gwt2 = sim2.get_model("gwt_model_1") ssm2 = gwt2.get_package("ssm") diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index 62945eec4..70c62e28e 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -1275,11 +1275,15 @@ def get_ims_package(self): ------- IMS package : ModflowIms """ - solution_group = self.simulation.name_file.solutiongroup.get_data() + solution_group = self.simulation.name_file.solutiongroup.get_data(0) for record in solution_group: - for model_name in record[2:]: - if model_name == self.name: - return self.simulation.get_solution_package(record[1]) + for name in record.dtype.names: + if name == "slntype" or name == "slnfname": + continue + if record[name] == self.name: + return self.simulation.get_solution_package( + record.slnfname + ) return None def get_steadystate_list(self): From 559ae74508de1b81a4ceae01b58090b8c4715510 Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Tue, 6 Aug 2024 15:49:40 -0400 Subject: [PATCH 52/57] ci(release): remove --approve from update_version.py call (#2281) This is no longer needed/supported as of #2240 --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 92c65dee9..f3a5717b0 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -56,7 +56,7 @@ jobs: if [[ "$ver" == *"rc"* ]]; then python scripts/update_version.py -v "${ver%"rc"}" else - python scripts/update_version.py -v "$ver" --approve + python scripts/update_version.py -v "$ver" fi # show version and set output From 4321b0b986659b367aa8b572086b7e4e0835ab6a Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Tue, 6 Aug 2024 16:51:14 -0400 Subject: [PATCH 53/57] docs(mfbcf): 'storage coefficient' -> 'specific yield' (#2282) Resolves #2268 --- flopy/mfusg/mfusgbcf.py | 2 +- flopy/modflow/mfbcf.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/flopy/mfusg/mfusgbcf.py b/flopy/mfusg/mfusgbcf.py index ea85670e3..290b8f37b 100644 --- a/flopy/mfusg/mfusgbcf.py +++ b/flopy/mfusg/mfusgbcf.py @@ -74,7 +74,7 @@ class MfUsgBcf(ModflowBcf): is the vertical hydraulic conductivity of the cell and the leakance is computed for each vertical connection. sf1 : float or array of floats (nlay, nrow, ncol) - specific storage (confined) or storage coefficient (unconfined), + specific storage (confined) or specific yield (unconfined), read when there is at least one transient stress period. (default is 1e-5) sf2 : float or array of floats (nlay, nrow, ncol) diff --git a/flopy/modflow/mfbcf.py b/flopy/modflow/mfbcf.py index f52b738bf..83d676448 100644 --- a/flopy/modflow/mfbcf.py +++ b/flopy/modflow/mfbcf.py @@ -45,7 +45,7 @@ class ModflowBcf(Package): vcont : float or array of floats (nlay-1, nrow, ncol) vertical leakance between layers (default is 1.0) sf1 : float or array of floats (nlay, nrow, ncol) - specific storage (confined) or storage coefficient (unconfined), + specific storage (confined) or specific yield (unconfined), read when there is at least one transient stress period. (default is 1e-5) sf2 : float or array of floats (nrow, ncol) From b64f2bdae803830936da89cf1c8e97ab4f660981 Mon Sep 17 00:00:00 2001 From: martclanor Date: Wed, 7 Aug 2024 22:21:37 +0200 Subject: [PATCH 54/57] fix(modelgrid): fix missing coord info if disv (#2284) Closes #2283 --------- Co-authored-by: wpbonelli --- autotest/test_mf6.py | 52 ++++++++++++++++++++++++++++++++++++++++++++ flopy/mf6/mfmodel.py | 13 ++++------- 2 files changed, 56 insertions(+), 9 deletions(-) diff --git a/autotest/test_mf6.py b/autotest/test_mf6.py index 2e1d93161..6401fa8fb 100644 --- a/autotest/test_mf6.py +++ b/autotest/test_mf6.py @@ -2420,3 +2420,55 @@ def test_remove_model(function_tmpdir, example_data_path): elif exg_index > 0: assert "end exchanges" in l break + + +def test_flopy_2283(function_tmpdir): + # create triangular grid + triangle_ws = function_tmpdir / "triangle" + triangle_ws.mkdir() + + active_area = [(0, 0), (0, 1000), (1000, 1000), (1000, 0)] + tri = Triangle(model_ws=triangle_ws, angle=30) + tri.add_polygon(active_area) + tri.add_region((1, 1), maximum_area=50**2) + + tri.build() + + # build vertex grid object + vgrid = flopy.discretization.VertexGrid( + vertices=tri.get_vertices(), + cell2d=tri.get_cell2d(), + xoff=199000, + yoff=215500, + crs=31370, + angrot=30, + ) + + # coord info is set (also correct when using vgrid.set_coord_info() + print(vgrid) + + # create MODFLOW 6 model + ws = function_tmpdir / "model" + ws.mkdir() + sim = flopy.mf6.MFSimulation(sim_name="prj-test", sim_ws=ws) + tdis = flopy.mf6.ModflowTdis(sim) + ims = flopy.mf6.ModflowIms(sim) + + gwf = flopy.mf6.ModflowGwf(sim, modelname="gwf") + disv = flopy.mf6.ModflowGwfdisv( + gwf, + xorigin=vgrid.xoffset, + yorigin=vgrid.yoffset, + angrot=vgrid.angrot, # no CRS info can be set in DISV + nlay=1, + top=0.0, + botm=-10.0, + ncpl=vgrid.ncpl, + nvert=vgrid.nvert, + cell2d=vgrid.cell2d, + vertices=tri.get_vertices(), # this is not stored in the Vertex grid object? + ) + + assert gwf.modelgrid.xoffset == disv.xorigin.get_data() + assert gwf.modelgrid.yoffset == disv.yorigin.get_data() + assert gwf.modelgrid.angrot == disv.angrot.get_data() diff --git a/flopy/mf6/mfmodel.py b/flopy/mf6/mfmodel.py index 70c62e28e..281173583 100644 --- a/flopy/mf6/mfmodel.py +++ b/flopy/mf6/mfmodel.py @@ -572,15 +572,10 @@ def modelgrid(self): else: return self._modelgrid - if self.get_grid_type() != DiscretizationType.DISV: - # get coordinate data from dis file - xorig = dis.xorigin.get_data() - yorig = dis.yorigin.get_data() - angrot = dis.angrot.get_data() - else: - xorig = self._modelgrid.xoffset - yorig = self._modelgrid.yoffset - angrot = self._modelgrid.angrot + # get coordinate data from dis file + xorig = dis.xorigin.get_data() + yorig = dis.yorigin.get_data() + angrot = dis.angrot.get_data() # resolve offsets if xorig is None: From bd7f0a578b9093697948255eb9ecc164d5574f6e Mon Sep 17 00:00:00 2001 From: Joshua Larsen Date: Thu, 8 Aug 2024 04:43:21 -0700 Subject: [PATCH 55/57] update(resample_to_grid): filter raster nan values from scipy resampling routines (#2285) Address #2273 (comment) --- flopy/utils/rasters.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/flopy/utils/rasters.py b/flopy/utils/rasters.py index fe9155121..4fabf0482 100644 --- a/flopy/utils/rasters.py +++ b/flopy/utils/rasters.py @@ -537,6 +537,13 @@ def resample_to_grid( arr = self.get_array(band, masked=True) arr = arr.flatten() + # filter out nan values from the original dataset + if np.isnan(np.sum(arr)): + idx = np.isfinite(arr) + rxc = rxc[idx] + ryc = ryc[idx] + arr = arr[idx] + # step 3: use griddata interpolation to snap to grid data = griddata( (rxc, ryc), From 2eace7843409b78497bc941d49eab68394833bfb Mon Sep 17 00:00:00 2001 From: wpbonelli Date: Thu, 8 Aug 2024 09:24:20 -0400 Subject: [PATCH 56/57] fix(examples): restore example notebooks skipped after #2264 (#2286) #2264 removed the notebook_utils.py module but neglected to remove this dependency from 3 examples: * export_vtk_tutorial.py * plot_cross_section_example.py * plot_map_view_example.py This is why these are missing from the develop version of the RTD site. This PR inlines the shared models which previously lived in notebook_utils.py. This duplication will go away once we have a models API as proposed in #1872. --- .docs/Notebooks/export_vtk_tutorial.py | 334 +++++++++- .docs/Notebooks/plot_cross_section_example.py | 622 +++++++++++++++++- .docs/Notebooks/plot_map_view_example.py | 334 +++++++++- 3 files changed, 1262 insertions(+), 28 deletions(-) diff --git a/.docs/Notebooks/export_vtk_tutorial.py b/.docs/Notebooks/export_vtk_tutorial.py index cdaaaac92..c21d40919 100644 --- a/.docs/Notebooks/export_vtk_tutorial.py +++ b/.docs/Notebooks/export_vtk_tutorial.py @@ -30,6 +30,7 @@ import os import sys from pathlib import Path +from pprint import pformat from tempfile import TemporaryDirectory import numpy as np @@ -37,17 +38,17 @@ import flopy from flopy.export import vtk -sys.path.append(os.path.join("..", "common")) -import notebook_utils - print(sys.version) print(f"flopy version: {flopy.__version__}") # - # load model for examples nam_file = "freyberg.nam" -prj_root = notebook_utils.get_project_root_path() -model_ws = prj_root / "examples" / "data" / "freyberg_multilayer_transient" +model_ws = Path( + os.path.join( + "..", "..", "examples", "data", "freyberg_multilayer_transient" + ) +) ml = flopy.modflow.Modflow.load(nam_file, model_ws=model_ws, check=False) # Create a temporary workspace. @@ -375,9 +376,330 @@ # # The `Vtk` class supports writing MODPATH pathline/timeseries data to a VTK file. To start the example, let's first load and run a MODPATH simulation (see flopy3_modpath7_unstructured_example for details) and then add the output to a `Vtk` object. + # + # load and run the vertex grid model and modpath7 -notebook_utils.run(workspace) +def run_vertex_grid_example(ws): + """load and run vertex grid example""" + if not os.path.exists(ws): + os.mkdir(ws) + + from flopy.utils.gridgen import Gridgen + + Lx = 10000.0 + Ly = 10500.0 + nlay = 3 + nrow = 21 + ncol = 20 + delr = Lx / ncol + delc = Ly / nrow + top = 400 + botm = [220, 200, 0] + + ms = flopy.modflow.Modflow() + dis5 = flopy.modflow.ModflowDis( + ms, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=delr, + delc=delc, + top=top, + botm=botm, + ) + + model_name = "mp7p2" + model_ws = os.path.join(ws, "mp7_ex2", "mf6") + gridgen_ws = os.path.join(model_ws, "gridgen") + g = Gridgen(ms.modelgrid, model_ws=gridgen_ws) + + rf0shp = os.path.join(gridgen_ws, "rf0") + xmin = 7 * delr + xmax = 12 * delr + ymin = 8 * delc + ymax = 13 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 1, range(nlay)) + + rf1shp = os.path.join(gridgen_ws, "rf1") + xmin = 8 * delr + xmax = 11 * delr + ymin = 9 * delc + ymax = 12 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 2, range(nlay)) + + rf2shp = os.path.join(gridgen_ws, "rf2") + xmin = 9 * delr + xmax = 10 * delr + ymin = 10 * delc + ymax = 11 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 3, range(nlay)) + + g.build(verbose=False) + + gridprops = g.get_gridprops_disv() + ncpl = gridprops["ncpl"] + top = gridprops["top"] + botm = gridprops["botm"] + nvert = gridprops["nvert"] + vertices = gridprops["vertices"] + cell2d = gridprops["cell2d"] + # cellxy = gridprops['cellxy'] + + # create simulation + sim = flopy.mf6.MFSimulation( + sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=model_ws + ) + + # create tdis package + tdis_rc = [(1000.0, 1, 1.0)] + tdis = flopy.mf6.ModflowTdis( + sim, pname="tdis", time_units="DAYS", perioddata=tdis_rc + ) + + # create gwf model + gwf = flopy.mf6.ModflowGwf( + sim, modelname=model_name, model_nam_file=f"{model_name}.nam" + ) + gwf.name_file.save_flows = True + + # create iterative model solution and register the gwf model with it + ims = flopy.mf6.ModflowIms( + sim, + pname="ims", + print_option="SUMMARY", + complexity="SIMPLE", + outer_hclose=1.0e-5, + outer_maximum=100, + under_relaxation="NONE", + inner_maximum=100, + inner_hclose=1.0e-6, + rcloserecord=0.1, + linear_acceleration="BICGSTAB", + scaling_method="NONE", + reordering_method="NONE", + relaxation_factor=0.99, + ) + sim.register_ims_package(ims, [gwf.name]) + + # disv + disv = flopy.mf6.ModflowGwfdisv( + gwf, + nlay=nlay, + ncpl=ncpl, + top=top, + botm=botm, + nvert=nvert, + vertices=vertices, + cell2d=cell2d, + ) + + # initial conditions + ic = flopy.mf6.ModflowGwfic(gwf, pname="ic", strt=320.0) + + # node property flow + npf = flopy.mf6.ModflowGwfnpf( + gwf, + xt3doptions=[("xt3d")], + save_specific_discharge=True, + icelltype=[1, 0, 0], + k=[50.0, 0.01, 200.0], + k33=[10.0, 0.01, 20.0], + ) + + # wel + wellpoints = [(4750.0, 5250.0)] + welcells = g.intersect(wellpoints, "point", 0) + # welspd = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=1, aux_vars=['iface']) + welspd = [[(2, icpl), -150000, 0] for icpl in welcells["nodenumber"]] + wel = flopy.mf6.ModflowGwfwel( + gwf, + print_input=True, + auxiliary=[("iface",)], + stress_period_data=welspd, + ) + + # rch + aux = [np.ones(ncpl, dtype=int) * 6] + rch = flopy.mf6.ModflowGwfrcha( + gwf, recharge=0.005, auxiliary=[("iface",)], aux={0: [6]} + ) + # riv + riverline = [[(Lx - 1.0, Ly), (Lx - 1.0, 0.0)]] + rivcells = g.intersect(riverline, "line", 0) + rivspd = [ + [(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"] + ] + riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd) + + # output control + oc = flopy.mf6.ModflowGwfoc( + gwf, + pname="oc", + budget_filerecord=f"{model_name}.cbb", + head_filerecord=f"{model_name}.hds", + headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + sim.write_simulation() + success, buff = sim.run_simulation(silent=True, report=True) + if success: + for line in buff: + print(line) + else: + raise ValueError("Failed to run.") + + mp_namea = f"{model_name}a_mp" + mp_nameb = f"{model_name}b_mp" + + pcoord = np.array( + [ + [0.000, 0.125, 0.500], + [0.000, 0.375, 0.500], + [0.000, 0.625, 0.500], + [0.000, 0.875, 0.500], + [1.000, 0.125, 0.500], + [1.000, 0.375, 0.500], + [1.000, 0.625, 0.500], + [1.000, 0.875, 0.500], + [0.125, 0.000, 0.500], + [0.375, 0.000, 0.500], + [0.625, 0.000, 0.500], + [0.875, 0.000, 0.500], + [0.125, 1.000, 0.500], + [0.375, 1.000, 0.500], + [0.625, 1.000, 0.500], + [0.875, 1.000, 0.500], + ] + ) + nodew = gwf.disv.ncpl.array * 2 + welcells["nodenumber"][0] + plocs = [nodew for i in range(pcoord.shape[0])] + + # create particle data + pa = flopy.modpath.ParticleData( + plocs, + structured=False, + localx=pcoord[:, 0], + localy=pcoord[:, 1], + localz=pcoord[:, 2], + drape=0, + ) + + # create backward particle group + fpth = f"{mp_namea}.sloc" + pga = flopy.modpath.ParticleGroup( + particlegroupname="BACKWARD1", particledata=pa, filename=fpth + ) + + facedata = flopy.modpath.FaceDataType( + drape=0, + verticaldivisions1=10, + horizontaldivisions1=10, + verticaldivisions2=10, + horizontaldivisions2=10, + verticaldivisions3=10, + horizontaldivisions3=10, + verticaldivisions4=10, + horizontaldivisions4=10, + rowdivisions5=0, + columndivisions5=0, + rowdivisions6=4, + columndivisions6=4, + ) + pb = flopy.modpath.NodeParticleData(subdivisiondata=facedata, nodes=nodew) + # create forward particle group + fpth = f"{mp_nameb}.sloc" + pgb = flopy.modpath.ParticleGroupNodeTemplate( + particlegroupname="BACKWARD2", particledata=pb, filename=fpth + ) + + # create modpath files + mp = flopy.modpath.Modpath7( + modelname=mp_namea, flowmodel=gwf, exe_name="mp7", model_ws=model_ws + ) + flopy.modpath.Modpath7Bas(mp, porosity=0.1) + flopy.modpath.Modpath7Sim( + mp, + simulationtype="combined", + trackingdirection="backward", + weaksinkoption="pass_through", + weaksourceoption="pass_through", + referencetime=0.0, + stoptimeoption="extend", + timepointdata=[500, 1000.0], + particlegroups=pga, + ) + + # write modpath datasets + mp.write_input() + + # run modpath + success, buff = mp.run_model(silent=True, report=True) + if success: + for line in buff: + print(line) + else: + raise ValueError("Failed to run.") + + # create modpath files + mp = flopy.modpath.Modpath7( + modelname=mp_nameb, flowmodel=gwf, exe_name="mp7", model_ws=model_ws + ) + flopy.modpath.Modpath7Bas(mp, porosity=0.1) + flopy.modpath.Modpath7Sim( + mp, + simulationtype="endpoint", + trackingdirection="backward", + weaksinkoption="pass_through", + weaksourceoption="pass_through", + referencetime=0.0, + stoptimeoption="extend", + particlegroups=pgb, + ) + + # write modpath datasets + mp.write_input() + + # run modpath + success, buff = mp.run_model(silent=True, report=True) + assert success, pformat(buff) + + +run_vertex_grid_example(workspace) # check if model ran properly modelpth = workspace / "mp7_ex2" / "mf6" diff --git a/.docs/Notebooks/plot_cross_section_example.py b/.docs/Notebooks/plot_cross_section_example.py index e25337940..d5d2ef381 100644 --- a/.docs/Notebooks/plot_cross_section_example.py +++ b/.docs/Notebooks/plot_cross_section_example.py @@ -34,9 +34,6 @@ import matplotlib.pyplot as plt import numpy as np -sys.path.append(os.path.join("..", "common")) -import notebook_utils - import flopy print(sys.version) @@ -53,8 +50,7 @@ exe_name_mf6 = "mf6" # Set the paths -prj_root = notebook_utils.get_project_root_path() -loadpth = str(prj_root / "examples" / "data" / "freyberg") +loadpth = os.path.join("..", "..", "examples", "data", "freyberg") tempdir = TemporaryDirectory() modelpth = tempdir.name @@ -416,7 +412,7 @@ # + # load the Freyberg model into mf6-flopy and run the simulation sim_name = "mfsim.nam" -sim_path = str(prj_root / "examples" / "data" / "mf6-freyberg") +sim_path = os.path.join("..", "..", "examples", "data", "mf6-freyberg") sim = flopy.mf6.MFSimulation.load( sim_name=sim_name, version=vmf6, exe_name=exe_name_mf6, sim_ws=sim_path ) @@ -424,11 +420,8 @@ sim.set_sim_path(modelpth) sim.write_simulation() success, buff = sim.run_simulation(silent=True, report=True) -if success: - for line in buff: - print(line) -else: - raise ValueError("Something bad happened.") +assert success, pformat(buff) + files = ["freyberg.hds", "freyberg.cbc"] for f in files: if os.path.isfile(os.path.join(str(modelpth), f)): @@ -520,9 +513,330 @@ # # FloPy fully supports vertex discretization (DISV) plotting through the `PlotCrossSection` class. The method calls are identical to the ones presented previously for Structured discretization (DIS) and the same matplotlib keyword arguments are supported. Let's run through an example using a vertex model grid. + # + pycharm={"name": "#%%\n"} # build and run vertex model grid demo problem -notebook_utils.run(modelpth) +def run_vertex_grid_example(ws): + """load and run vertex grid example""" + if not os.path.exists(ws): + os.mkdir(ws) + + from flopy.utils.gridgen import Gridgen + + Lx = 10000.0 + Ly = 10500.0 + nlay = 3 + nrow = 21 + ncol = 20 + delr = Lx / ncol + delc = Ly / nrow + top = 400 + botm = [220, 200, 0] + + ms = flopy.modflow.Modflow() + dis5 = flopy.modflow.ModflowDis( + ms, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=delr, + delc=delc, + top=top, + botm=botm, + ) + + model_name = "mp7p2" + model_ws = os.path.join(ws, "mp7_ex2", "mf6") + gridgen_ws = os.path.join(model_ws, "gridgen") + g = Gridgen(ms.modelgrid, model_ws=gridgen_ws) + + rf0shp = os.path.join(gridgen_ws, "rf0") + xmin = 7 * delr + xmax = 12 * delr + ymin = 8 * delc + ymax = 13 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 1, range(nlay)) + + rf1shp = os.path.join(gridgen_ws, "rf1") + xmin = 8 * delr + xmax = 11 * delr + ymin = 9 * delc + ymax = 12 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 2, range(nlay)) + + rf2shp = os.path.join(gridgen_ws, "rf2") + xmin = 9 * delr + xmax = 10 * delr + ymin = 10 * delc + ymax = 11 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 3, range(nlay)) + + g.build(verbose=False) + + gridprops = g.get_gridprops_disv() + ncpl = gridprops["ncpl"] + top = gridprops["top"] + botm = gridprops["botm"] + nvert = gridprops["nvert"] + vertices = gridprops["vertices"] + cell2d = gridprops["cell2d"] + # cellxy = gridprops['cellxy'] + + # create simulation + sim = flopy.mf6.MFSimulation( + sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=model_ws + ) + + # create tdis package + tdis_rc = [(1000.0, 1, 1.0)] + tdis = flopy.mf6.ModflowTdis( + sim, pname="tdis", time_units="DAYS", perioddata=tdis_rc + ) + + # create gwf model + gwf = flopy.mf6.ModflowGwf( + sim, modelname=model_name, model_nam_file=f"{model_name}.nam" + ) + gwf.name_file.save_flows = True + + # create iterative model solution and register the gwf model with it + ims = flopy.mf6.ModflowIms( + sim, + pname="ims", + print_option="SUMMARY", + complexity="SIMPLE", + outer_hclose=1.0e-5, + outer_maximum=100, + under_relaxation="NONE", + inner_maximum=100, + inner_hclose=1.0e-6, + rcloserecord=0.1, + linear_acceleration="BICGSTAB", + scaling_method="NONE", + reordering_method="NONE", + relaxation_factor=0.99, + ) + sim.register_ims_package(ims, [gwf.name]) + + # disv + disv = flopy.mf6.ModflowGwfdisv( + gwf, + nlay=nlay, + ncpl=ncpl, + top=top, + botm=botm, + nvert=nvert, + vertices=vertices, + cell2d=cell2d, + ) + + # initial conditions + ic = flopy.mf6.ModflowGwfic(gwf, pname="ic", strt=320.0) + + # node property flow + npf = flopy.mf6.ModflowGwfnpf( + gwf, + xt3doptions=[("xt3d")], + save_specific_discharge=True, + icelltype=[1, 0, 0], + k=[50.0, 0.01, 200.0], + k33=[10.0, 0.01, 20.0], + ) + + # wel + wellpoints = [(4750.0, 5250.0)] + welcells = g.intersect(wellpoints, "point", 0) + # welspd = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=1, aux_vars=['iface']) + welspd = [[(2, icpl), -150000, 0] for icpl in welcells["nodenumber"]] + wel = flopy.mf6.ModflowGwfwel( + gwf, + print_input=True, + auxiliary=[("iface",)], + stress_period_data=welspd, + ) + + # rch + aux = [np.ones(ncpl, dtype=int) * 6] + rch = flopy.mf6.ModflowGwfrcha( + gwf, recharge=0.005, auxiliary=[("iface",)], aux={0: [6]} + ) + # riv + riverline = [[(Lx - 1.0, Ly), (Lx - 1.0, 0.0)]] + rivcells = g.intersect(riverline, "line", 0) + rivspd = [ + [(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"] + ] + riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd) + + # output control + oc = flopy.mf6.ModflowGwfoc( + gwf, + pname="oc", + budget_filerecord=f"{model_name}.cbb", + head_filerecord=f"{model_name}.hds", + headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + sim.write_simulation() + success, buff = sim.run_simulation(silent=True, report=True) + if success: + for line in buff: + print(line) + else: + raise ValueError("Failed to run.") + + mp_namea = f"{model_name}a_mp" + mp_nameb = f"{model_name}b_mp" + + pcoord = np.array( + [ + [0.000, 0.125, 0.500], + [0.000, 0.375, 0.500], + [0.000, 0.625, 0.500], + [0.000, 0.875, 0.500], + [1.000, 0.125, 0.500], + [1.000, 0.375, 0.500], + [1.000, 0.625, 0.500], + [1.000, 0.875, 0.500], + [0.125, 0.000, 0.500], + [0.375, 0.000, 0.500], + [0.625, 0.000, 0.500], + [0.875, 0.000, 0.500], + [0.125, 1.000, 0.500], + [0.375, 1.000, 0.500], + [0.625, 1.000, 0.500], + [0.875, 1.000, 0.500], + ] + ) + nodew = gwf.disv.ncpl.array * 2 + welcells["nodenumber"][0] + plocs = [nodew for i in range(pcoord.shape[0])] + + # create particle data + pa = flopy.modpath.ParticleData( + plocs, + structured=False, + localx=pcoord[:, 0], + localy=pcoord[:, 1], + localz=pcoord[:, 2], + drape=0, + ) + + # create backward particle group + fpth = f"{mp_namea}.sloc" + pga = flopy.modpath.ParticleGroup( + particlegroupname="BACKWARD1", particledata=pa, filename=fpth + ) + + facedata = flopy.modpath.FaceDataType( + drape=0, + verticaldivisions1=10, + horizontaldivisions1=10, + verticaldivisions2=10, + horizontaldivisions2=10, + verticaldivisions3=10, + horizontaldivisions3=10, + verticaldivisions4=10, + horizontaldivisions4=10, + rowdivisions5=0, + columndivisions5=0, + rowdivisions6=4, + columndivisions6=4, + ) + pb = flopy.modpath.NodeParticleData(subdivisiondata=facedata, nodes=nodew) + # create forward particle group + fpth = f"{mp_nameb}.sloc" + pgb = flopy.modpath.ParticleGroupNodeTemplate( + particlegroupname="BACKWARD2", particledata=pb, filename=fpth + ) + + # create modpath files + mp = flopy.modpath.Modpath7( + modelname=mp_namea, flowmodel=gwf, exe_name="mp7", model_ws=model_ws + ) + flopy.modpath.Modpath7Bas(mp, porosity=0.1) + flopy.modpath.Modpath7Sim( + mp, + simulationtype="combined", + trackingdirection="backward", + weaksinkoption="pass_through", + weaksourceoption="pass_through", + referencetime=0.0, + stoptimeoption="extend", + timepointdata=[500, 1000.0], + particlegroups=pga, + ) + + # write modpath datasets + mp.write_input() + + # run modpath + success, buff = mp.run_model(silent=True, report=True) + if success: + for line in buff: + print(line) + else: + raise ValueError("Failed to run.") + + # create modpath files + mp = flopy.modpath.Modpath7( + modelname=mp_nameb, flowmodel=gwf, exe_name="mp7", model_ws=model_ws + ) + flopy.modpath.Modpath7Bas(mp, porosity=0.1) + flopy.modpath.Modpath7Sim( + mp, + simulationtype="endpoint", + trackingdirection="backward", + weaksinkoption="pass_through", + weaksourceoption="pass_through", + referencetime=0.0, + stoptimeoption="extend", + particlegroups=pgb, + ) + + # write modpath datasets + mp.write_input() + + # run modpath + success, buff = mp.run_model(silent=True, report=True) + assert success, pformat(buff) + + +run_vertex_grid_example(modelpth) # check if model ran properly modelpth = os.path.join(modelpth, "mp7_ex2", "mf6") @@ -681,10 +995,290 @@ # # FloPy's plotting routines can be used with built in styles from the `styles` module. The `styles` module takes advantage of matplotlib's temporary styling routines by reading in pre-built style sheets. Two different types of styles have been built for flopy: `USGSMap()` and `USGSPlot()` styles which can be used to create report quality figures. The styles module also contains a number of methods that can be used for adding axis labels, text, annotations, headings, removing tick lines, and updating the current font. # -# This example will load the Keating groundwater transport model and plot results using `styles` +# This example will run the Keating groundwater transport model and plot results using `styles` # + pycharm={"name": "#%%\n"} -notebook_utils.run_keating_model(modelpth) + +example_name = "ex-gwt-keating" + +# Model units + +length_units = "m" +time_units = "days" + +# Table of model parameters + +nlay = 80 # Number of layers +nrow = 1 # Number of rows +ncol = 400 # Number of columns +delr = 25.0 # Column width ($m$) +delc = 1.0 # Row width ($m$) +delz = 25.0 # Layer thickness ($m$) +top = 2000.0 # Top of model domain ($m$) +bottom = 0.0 # Bottom of model domain ($m$) +hka = 1.0e-12 # Permeability of aquifer ($m^2$) +hkc = 1.0e-18 # Permeability of aquitard ($m^2$) +h1 = 800.0 # Head on left side ($m$) +h2 = 100.0 # Head on right side ($m$) +recharge = 0.5 # Recharge ($kg/s$) +recharge_conc = 1.0 # Normalized recharge concentration (unitless) +alpha_l = 1.0 # Longitudinal dispersivity ($m$) +alpha_th = 1.0 # Transverse horizontal dispersivity ($m$) +alpha_tv = 1.0 # Transverse vertical dispersivity ($m$) +period1 = 730 # Length of first simulation period ($d$) +period2 = 29270.0 # Length of second simulation period ($d$) +porosity = 0.1 # Porosity of mobile domain (unitless) +obs1 = (49, 1, 119) # Layer, row, and column for observation 1 +obs2 = (77, 1, 359) # Layer, row, and column for observation 2 + +obs1 = tuple([i - 1 for i in obs1]) +obs2 = tuple([i - 1 for i in obs2]) +seconds_to_days = 24.0 * 60.0 * 60.0 +permeability_to_conductivity = 1000.0 * 9.81 / 1.0e-3 * seconds_to_days +hka = hka * permeability_to_conductivity +hkc = hkc * permeability_to_conductivity +botm = [top - (k + 1) * delz for k in range(nlay)] +x = np.arange(0, 10000.0, delr) + delr / 2.0 +plotaspect = 1.0 + +# Fill hydraulic conductivity array +hydraulic_conductivity = np.ones((nlay, nrow, ncol), dtype=float) * hka +for k in range(nlay): + if 1000.0 <= botm[k] < 1100.0: + for j in range(ncol): + if 3000.0 <= x[j] <= 6000.0: + hydraulic_conductivity[k, 0, j] = hkc + +# Calculate recharge by converting from kg/s to m/d +rcol = [] +for jcol in range(ncol): + if 4200.0 <= x[jcol] <= 4800.0: + rcol.append(jcol) +number_recharge_cells = len(rcol) +rrate = recharge * seconds_to_days / 1000.0 +cell_area = delr * delc +rrate = rrate / (float(number_recharge_cells) * cell_area) +rchspd = {} +rchspd[0] = [[(0, 0, j), rrate, recharge_conc] for j in rcol] +rchspd[1] = [[(0, 0, j), rrate, 0.0] for j in rcol] + + +def build_mf6gwf(sim_folder): + ws = os.path.join(sim_folder, "mf6-gwt-keating") + name = "flow" + sim_ws = os.path.join(ws, "mf6gwf") + sim = flopy.mf6.MFSimulation(sim_name=name, sim_ws=sim_ws, exe_name="mf6") + tdis_ds = ((period1, 1, 1.0), (period2, 1, 1.0)) + flopy.mf6.ModflowTdis( + sim, nper=len(tdis_ds), perioddata=tdis_ds, time_units=time_units + ) + flopy.mf6.ModflowIms( + sim, + print_option="summary", + complexity="complex", + no_ptcrecord="all", + outer_dvclose=1.0e-4, + outer_maximum=2000, + under_relaxation="dbd", + linear_acceleration="BICGSTAB", + under_relaxation_theta=0.7, + under_relaxation_kappa=0.08, + under_relaxation_gamma=0.05, + under_relaxation_momentum=0.0, + backtracking_number=20, + backtracking_tolerance=2.0, + backtracking_reduction_factor=0.2, + backtracking_residual_limit=5.0e-4, + inner_dvclose=1.0e-5, + rcloserecord=[0.0001, "relative_rclose"], + inner_maximum=100, + relaxation_factor=0.0, + number_orthogonalizations=2, + preconditioner_levels=8, + preconditioner_drop_tolerance=0.001, + ) + gwf = flopy.mf6.ModflowGwf( + sim, modelname=name, save_flows=True, newtonoptions=["newton"] + ) + flopy.mf6.ModflowGwfdis( + gwf, + length_units=length_units, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=delr, + delc=delc, + top=top, + botm=botm, + ) + flopy.mf6.ModflowGwfnpf( + gwf, + save_specific_discharge=True, + save_saturation=True, + icelltype=1, + k=hydraulic_conductivity, + ) + flopy.mf6.ModflowGwfic(gwf, strt=600.0) + chdspd = [[(k, 0, 0), h1] for k in range(nlay) if botm[k] < h1] + chdspd += [[(k, 0, ncol - 1), h2] for k in range(nlay) if botm[k] < h2] + flopy.mf6.ModflowGwfchd( + gwf, + stress_period_data=chdspd, + print_input=True, + print_flows=True, + save_flows=False, + pname="CHD-1", + ) + flopy.mf6.ModflowGwfrch( + gwf, + stress_period_data=rchspd, + auxiliary=["concentration"], + pname="RCH-1", + ) + + head_filerecord = f"{name}.hds" + budget_filerecord = f"{name}.bud" + flopy.mf6.ModflowGwfoc( + gwf, + head_filerecord=head_filerecord, + budget_filerecord=budget_filerecord, + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + return sim + + +def build_mf6gwt(sim_folder): + ws = os.path.join(sim_folder, "mf6-gwt-keating") + name = "trans" + sim_ws = os.path.join(ws, "mf6gwt") + sim = flopy.mf6.MFSimulation( + sim_name=name, + sim_ws=sim_ws, + exe_name="mf6", + continue_=True, + ) + tdis_ds = ((period1, 73, 1.0), (period2, 2927, 1.0)) + flopy.mf6.ModflowTdis( + sim, nper=len(tdis_ds), perioddata=tdis_ds, time_units=time_units + ) + flopy.mf6.ModflowIms( + sim, + print_option="summary", + outer_dvclose=1.0e-4, + outer_maximum=100, + under_relaxation="none", + linear_acceleration="BICGSTAB", + rcloserecord=[1000.0, "strict"], + inner_maximum=20, + inner_dvclose=1.0e-4, + relaxation_factor=0.0, + number_orthogonalizations=2, + preconditioner_levels=8, + preconditioner_drop_tolerance=0.001, + ) + gwt = flopy.mf6.ModflowGwt(sim, modelname=name, save_flows=True) + flopy.mf6.ModflowGwtdis( + gwt, + length_units=length_units, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=delr, + delc=delc, + top=top, + botm=botm, + ) + flopy.mf6.ModflowGwtic(gwt, strt=0) + flopy.mf6.ModflowGwtmst(gwt, porosity=porosity) + flopy.mf6.ModflowGwtadv(gwt, scheme="upstream") + flopy.mf6.ModflowGwtdsp( + gwt, xt3d_off=True, alh=alpha_l, ath1=alpha_th, atv=alpha_tv + ) + pd = [ + ("GWFHEAD", "../mf6gwf/flow.hds"), + ("GWFBUDGET", "../mf6gwf/flow.bud"), + ] + flopy.mf6.ModflowGwtfmi( + gwt, flow_imbalance_correction=True, packagedata=pd + ) + sourcerecarray = [ + ("RCH-1", "AUX", "CONCENTRATION"), + ] + flopy.mf6.ModflowGwtssm(gwt, sources=sourcerecarray) + saverecord = { + 0: [ + ("CONCENTRATION", "STEPS", 10), + ("CONCENTRATION", "LAST"), + ("CONCENTRATION", "FREQUENCY", 10), + ], + 1: [ + ("CONCENTRATION", "STEPS", 27, 227), + ("CONCENTRATION", "LAST"), + ("CONCENTRATION", "FREQUENCY", 10), + ], + } + flopy.mf6.ModflowGwtoc( + gwt, + budget_filerecord=f"{name}.cbc", + concentration_filerecord=f"{name}.ucn", + concentrationprintrecord=[ + ("COLUMNS", ncol, "WIDTH", 15, "DIGITS", 6, "GENERAL") + ], + saverecord=saverecord, + printrecord=[ + ("CONCENTRATION", "LAST"), + ( + "BUDGET", + "ALL", + ), + ], + ) + obs_data = { + f"{name}.obs.csv": [ + ("obs1", "CONCENTRATION", obs1), + ("obs2", "CONCENTRATION", obs2), + ], + } + flopy.mf6.ModflowUtlobs( + gwt, digits=10, print_input=True, continuous=obs_data + ) + return sim + + +def build_model(ws): + sim_mf6gwf = build_mf6gwf(ws) + sim_mf6gwt = build_mf6gwt(ws) + sim_mf2005 = None # build_mf2005(sim_name) + sim_mt3dms = None # build_mt3dms(sim_name, sim_mf2005) + sims = (sim_mf6gwf, sim_mf6gwt, sim_mf2005, sim_mt3dms) + return sims + + +def write_model(sims, silent=True): + sim_mf6gwf, sim_mf6gwt, sim_mf2005, sim_mt3dms = sims + sim_mf6gwf.write_simulation(silent=silent) + sim_mf6gwt.write_simulation(silent=silent) + + +def run_keating_model(ws=example_name, silent=True): + sim = build_model(ws) + write_model(sim, silent=silent) + sim_mf6gwf, sim_mf6gwt, sim_mf2005, sim_mt3dms = sim + + print("Running mf6gwf model...") + success, buff = sim_mf6gwf.run_simulation(silent=silent) + if not success: + print(buff) + + print("Running mf6gwt model...") + success, buff = sim_mf6gwt.run_simulation(silent=silent) + if not success: + print(buff) + + return success + + +run_keating_model(modelpth) # + [markdown] pycharm={"name": "#%% md\n"} # Load the flow and transport models diff --git a/.docs/Notebooks/plot_map_view_example.py b/.docs/Notebooks/plot_map_view_example.py index d52969e4f..bb79a16e9 100644 --- a/.docs/Notebooks/plot_map_view_example.py +++ b/.docs/Notebooks/plot_map_view_example.py @@ -36,9 +36,6 @@ import numpy as np import shapefile -sys.path.append(os.path.join("..", "common")) -import notebook_utils - import flopy print(sys.version) @@ -56,8 +53,7 @@ exe_mp = "mp6" # Set the paths -prj_root = notebook_utils.get_project_root_path() -loadpth = str(prj_root / "examples" / "data" / "freyberg") +loadpth = os.path.join("..", "..", "examples", "data", "freyberg") tempdir = TemporaryDirectory() modelpth = tempdir.name @@ -586,7 +582,7 @@ # + pycharm={"name": "#%%\n"} # load the Freyberg model into mf6-flopy and run the simulation sim_name = "mfsim.nam" -sim_path = str(prj_root / "examples" / "data" / "mf6-freyberg") +sim_path = os.path.join("..", "..", "examples", "data", "mf6-freyberg") sim = flopy.mf6.MFSimulation.load( sim_name=sim_name, version=vmf6, exe_name=exe_name_mf6, sim_ws=sim_path ) @@ -702,7 +698,329 @@ # + pycharm={"name": "#%%\n"} # build and run vertex model grid demo problem -notebook_utils.run(modelpth) + + +def run_vertex_grid_example(ws): + """load and run vertex grid example""" + if not os.path.exists(ws): + os.mkdir(ws) + + from flopy.utils.gridgen import Gridgen + + Lx = 10000.0 + Ly = 10500.0 + nlay = 3 + nrow = 21 + ncol = 20 + delr = Lx / ncol + delc = Ly / nrow + top = 400 + botm = [220, 200, 0] + + ms = flopy.modflow.Modflow() + dis5 = flopy.modflow.ModflowDis( + ms, + nlay=nlay, + nrow=nrow, + ncol=ncol, + delr=delr, + delc=delc, + top=top, + botm=botm, + ) + + model_name = "mp7p2" + model_ws = os.path.join(ws, "mp7_ex2", "mf6") + gridgen_ws = os.path.join(model_ws, "gridgen") + g = Gridgen(ms.modelgrid, model_ws=gridgen_ws) + + rf0shp = os.path.join(gridgen_ws, "rf0") + xmin = 7 * delr + xmax = 12 * delr + ymin = 8 * delc + ymax = 13 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 1, range(nlay)) + + rf1shp = os.path.join(gridgen_ws, "rf1") + xmin = 8 * delr + xmax = 11 * delr + ymin = 9 * delc + ymax = 12 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 2, range(nlay)) + + rf2shp = os.path.join(gridgen_ws, "rf2") + xmin = 9 * delr + xmax = 10 * delr + ymin = 10 * delc + ymax = 11 * delc + rfpoly = [ + [ + [ + (xmin, ymin), + (xmax, ymin), + (xmax, ymax), + (xmin, ymax), + (xmin, ymin), + ] + ] + ] + g.add_refinement_features(rfpoly, "polygon", 3, range(nlay)) + + g.build(verbose=False) + + gridprops = g.get_gridprops_disv() + ncpl = gridprops["ncpl"] + top = gridprops["top"] + botm = gridprops["botm"] + nvert = gridprops["nvert"] + vertices = gridprops["vertices"] + cell2d = gridprops["cell2d"] + # cellxy = gridprops['cellxy'] + + # create simulation + sim = flopy.mf6.MFSimulation( + sim_name=model_name, version="mf6", exe_name="mf6", sim_ws=model_ws + ) + + # create tdis package + tdis_rc = [(1000.0, 1, 1.0)] + tdis = flopy.mf6.ModflowTdis( + sim, pname="tdis", time_units="DAYS", perioddata=tdis_rc + ) + + # create gwf model + gwf = flopy.mf6.ModflowGwf( + sim, modelname=model_name, model_nam_file=f"{model_name}.nam" + ) + gwf.name_file.save_flows = True + + # create iterative model solution and register the gwf model with it + ims = flopy.mf6.ModflowIms( + sim, + pname="ims", + print_option="SUMMARY", + complexity="SIMPLE", + outer_hclose=1.0e-5, + outer_maximum=100, + under_relaxation="NONE", + inner_maximum=100, + inner_hclose=1.0e-6, + rcloserecord=0.1, + linear_acceleration="BICGSTAB", + scaling_method="NONE", + reordering_method="NONE", + relaxation_factor=0.99, + ) + sim.register_ims_package(ims, [gwf.name]) + + # disv + disv = flopy.mf6.ModflowGwfdisv( + gwf, + nlay=nlay, + ncpl=ncpl, + top=top, + botm=botm, + nvert=nvert, + vertices=vertices, + cell2d=cell2d, + ) + + # initial conditions + ic = flopy.mf6.ModflowGwfic(gwf, pname="ic", strt=320.0) + + # node property flow + npf = flopy.mf6.ModflowGwfnpf( + gwf, + xt3doptions=[("xt3d")], + save_specific_discharge=True, + icelltype=[1, 0, 0], + k=[50.0, 0.01, 200.0], + k33=[10.0, 0.01, 20.0], + ) + + # wel + wellpoints = [(4750.0, 5250.0)] + welcells = g.intersect(wellpoints, "point", 0) + # welspd = flopy.mf6.ModflowGwfwel.stress_period_data.empty(gwf, maxbound=1, aux_vars=['iface']) + welspd = [[(2, icpl), -150000, 0] for icpl in welcells["nodenumber"]] + wel = flopy.mf6.ModflowGwfwel( + gwf, + print_input=True, + auxiliary=[("iface",)], + stress_period_data=welspd, + ) + + # rch + aux = [np.ones(ncpl, dtype=int) * 6] + rch = flopy.mf6.ModflowGwfrcha( + gwf, recharge=0.005, auxiliary=[("iface",)], aux={0: [6]} + ) + # riv + riverline = [[(Lx - 1.0, Ly), (Lx - 1.0, 0.0)]] + rivcells = g.intersect(riverline, "line", 0) + rivspd = [ + [(0, icpl), 320.0, 100000.0, 318] for icpl in rivcells["nodenumber"] + ] + riv = flopy.mf6.ModflowGwfriv(gwf, stress_period_data=rivspd) + + # output control + oc = flopy.mf6.ModflowGwfoc( + gwf, + pname="oc", + budget_filerecord=f"{model_name}.cbb", + head_filerecord=f"{model_name}.hds", + headprintrecord=[("COLUMNS", 10, "WIDTH", 15, "DIGITS", 6, "GENERAL")], + saverecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + printrecord=[("HEAD", "ALL"), ("BUDGET", "ALL")], + ) + + sim.write_simulation() + success, buff = sim.run_simulation(silent=True, report=True) + if success: + for line in buff: + print(line) + else: + raise ValueError("Failed to run.") + + mp_namea = f"{model_name}a_mp" + mp_nameb = f"{model_name}b_mp" + + pcoord = np.array( + [ + [0.000, 0.125, 0.500], + [0.000, 0.375, 0.500], + [0.000, 0.625, 0.500], + [0.000, 0.875, 0.500], + [1.000, 0.125, 0.500], + [1.000, 0.375, 0.500], + [1.000, 0.625, 0.500], + [1.000, 0.875, 0.500], + [0.125, 0.000, 0.500], + [0.375, 0.000, 0.500], + [0.625, 0.000, 0.500], + [0.875, 0.000, 0.500], + [0.125, 1.000, 0.500], + [0.375, 1.000, 0.500], + [0.625, 1.000, 0.500], + [0.875, 1.000, 0.500], + ] + ) + nodew = gwf.disv.ncpl.array * 2 + welcells["nodenumber"][0] + plocs = [nodew for i in range(pcoord.shape[0])] + + # create particle data + pa = flopy.modpath.ParticleData( + plocs, + structured=False, + localx=pcoord[:, 0], + localy=pcoord[:, 1], + localz=pcoord[:, 2], + drape=0, + ) + + # create backward particle group + fpth = f"{mp_namea}.sloc" + pga = flopy.modpath.ParticleGroup( + particlegroupname="BACKWARD1", particledata=pa, filename=fpth + ) + + facedata = flopy.modpath.FaceDataType( + drape=0, + verticaldivisions1=10, + horizontaldivisions1=10, + verticaldivisions2=10, + horizontaldivisions2=10, + verticaldivisions3=10, + horizontaldivisions3=10, + verticaldivisions4=10, + horizontaldivisions4=10, + rowdivisions5=0, + columndivisions5=0, + rowdivisions6=4, + columndivisions6=4, + ) + pb = flopy.modpath.NodeParticleData(subdivisiondata=facedata, nodes=nodew) + # create forward particle group + fpth = f"{mp_nameb}.sloc" + pgb = flopy.modpath.ParticleGroupNodeTemplate( + particlegroupname="BACKWARD2", particledata=pb, filename=fpth + ) + + # create modpath files + mp = flopy.modpath.Modpath7( + modelname=mp_namea, flowmodel=gwf, exe_name="mp7", model_ws=model_ws + ) + flopy.modpath.Modpath7Bas(mp, porosity=0.1) + flopy.modpath.Modpath7Sim( + mp, + simulationtype="combined", + trackingdirection="backward", + weaksinkoption="pass_through", + weaksourceoption="pass_through", + referencetime=0.0, + stoptimeoption="extend", + timepointdata=[500, 1000.0], + particlegroups=pga, + ) + + # write modpath datasets + mp.write_input() + + # run modpath + success, buff = mp.run_model(silent=True, report=True) + if success: + for line in buff: + print(line) + else: + raise ValueError("Failed to run.") + + # create modpath files + mp = flopy.modpath.Modpath7( + modelname=mp_nameb, flowmodel=gwf, exe_name="mp7", model_ws=model_ws + ) + flopy.modpath.Modpath7Bas(mp, porosity=0.1) + flopy.modpath.Modpath7Sim( + mp, + simulationtype="endpoint", + trackingdirection="backward", + weaksinkoption="pass_through", + weaksourceoption="pass_through", + referencetime=0.0, + stoptimeoption="extend", + particlegroups=pgb, + ) + + # write modpath datasets + mp.write_input() + + # run modpath + success, buff = mp.run_model(silent=True, report=True) + assert success, pformat(buff) + + +run_vertex_grid_example(modelpth) # check if model ran properly modelpth = os.path.join(modelpth, "mp7_ex2", "mf6") @@ -879,7 +1197,7 @@ from flopy.discretization import UnstructuredGrid # this is a folder containing some unstructured grids -datapth = str(prj_root / "examples" / "data" / "unstructured") +datapth = os.path.join("..", "..", "examples", "data", "unstructured") # simple functions to load vertices and incidence lists From 9cd559e94f68fedc8e3db88f0dbf9f0642b295eb Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Thu, 8 Aug 2024 14:06:35 +0000 Subject: [PATCH 57/57] ci(release): set version to 3.8.0, update plugins from DFN files, update changelog --- .docs/md/version_changes.md | 39 +++++++++++++++++++++++++++++++++++++ CITATION.cff | 4 ++-- README.md | 4 ++-- docs/PyPI_release.md | 2 +- flopy/version.py | 4 ++-- version.txt | 2 +- 6 files changed, 47 insertions(+), 8 deletions(-) diff --git a/.docs/md/version_changes.md b/.docs/md/version_changes.md index d3a2a1e26..68d22ebe1 100644 --- a/.docs/md/version_changes.md +++ b/.docs/md/version_changes.md @@ -1,4 +1,43 @@ # Changelog +### Version 3.8.0 + +#### New features + +* [feat(datafile)](https://github.com/modflowpy/flopy/commit/d36bb78c3b7a12ab6f77bbe31e3572915753c86b): Add .headers property with data frame (#2221). Committed by Mike Taves on 2024-06-11. +* [feat(lgr-disv)](https://github.com/modflowpy/flopy/commit/7dec7c52db7c7bf3f8bca61de4d4a953ac1317d2): Add to_disv_gridprops() method to lgr object (#2271). Committed by langevin-usgs on 2024-07-26. + +#### Bug fixes + +* [fix(docs)](https://github.com/modflowpy/flopy/commit/4a26cab4e0af4f49775fd0dc327c8f5ff51843f6): Section underline matches section title (#2208). Committed by Mike Taves on 2024-06-06. +* [fix(vtk)](https://github.com/modflowpy/flopy/commit/d81d7c089f0688173f25c1f6d1e860e08c3a17ba): Fix __transient_vector access (#2209). Committed by mickey-tsai on 2024-06-06. +* [fix(swt)](https://github.com/modflowpy/flopy/commit/667774231a3c3e40fb68067331ead4b8a576cbee): Pass load_only down to Mt3dms.load() (#2222). Committed by wpbonelli on 2024-06-11. +* [fix(ParticleTrackFile)](https://github.com/modflowpy/flopy/commit/f15caaa0554f306eb5839588e4c75f9e14ef9641): Fix particle filtering in get_alldata (#2223). Committed by martclanor on 2024-06-11. +* [fix(regression)](https://github.com/modflowpy/flopy/commit/c69990ac37ce5d6828472af1eadab4dc6687c1e8): Corrections to test_create_tests_transport (#2228). Committed by Mike Taves on 2024-06-13. +* [fix(binaryread)](https://github.com/modflowpy/flopy/commit/e2a85a38640656d5795f8859defb0de14cf668e6): Raise/handle EOFError, deprecate vartype=str (#2226). Committed by Mike Taves on 2024-06-13. +* [fix(pandas warnings)](https://github.com/modflowpy/flopy/commit/5cdd609748cc70d93859192519d87d34194aec40): Catch pandas warnings and display them in a more useful way (#2229). Committed by scottrp on 2024-06-14. +* [fix](https://github.com/modflowpy/flopy/commit/d9ebd81903bb6aa03864e156a0488128867286ef): Test_uzf_negative_iuzfopt (#2236). Committed by Mike Taves on 2024-06-17. +* [fix(PlotMapView)](https://github.com/modflowpy/flopy/commit/678bb61346bc226831ae5b66615bc9a00c355cc5): Default to all layers in plot_pathline() (#2242). Committed by wpbonelli on 2024-06-19. +* [fix(Raster)](https://github.com/modflowpy/flopy/commit/a2a159f1758781fc633710f68af5441eb1e4dafb): Reclassify np.float64 correctly (#2235). Committed by martclanor on 2024-06-24. +* [fix(HeadFile)](https://github.com/modflowpy/flopy/commit/9db562a3b1d18af3801036b1d79d74668c0f71c6): Fix dis reversal, expand tests (#2247). Committed by wpbonelli on 2024-06-25. +* [fix(mfmodel)](https://github.com/modflowpy/flopy/commit/576cefe5e9826a53a5085d7e3aee9ce7765be22f): Fix get_ims_package (#2272). Committed by martclanor on 2024-08-06. +* [fix(modelgrid)](https://github.com/modflowpy/flopy/commit/b64f2bdae803830936da89cf1c8e97ab4f660981): Fix missing coord info if disv (#2284). Committed by martclanor on 2024-08-07. +* [fix(examples)](https://github.com/modflowpy/flopy/commit/2eace7843409b78497bc941d49eab68394833bfb): Restore example notebooks skipped after #2264 (#2286). Committed by wpbonelli on 2024-08-08. + +#### Refactoring + +* [refactor(expired deprecation)](https://github.com/modflowpy/flopy/commit/31955a7536b1f53d2a572580e05ff282a933716e): Raise AttributeError with to_shapefile (#2200). Committed by Mike Taves on 2024-05-30. +* [refactor](https://github.com/modflowpy/flopy/commit/bbabf86c0292ed2b237f89371afba01140050592): Deprecate unused flopy.utils.binaryfile.binaryread_struct (#2201). Committed by Mike Taves on 2024-05-31. +* [refactor(exceptions)](https://github.com/modflowpy/flopy/commit/0d9947eb8301561569676d4e3bdbc28a869e5bad): Raise NotImplementedError where appropriate (#2213). Committed by Mike Taves on 2024-06-07. +* [refactor(datafile)](https://github.com/modflowpy/flopy/commit/e2d16df5cc1a27a43e274a5b16eee7d91d5decfa): Use len(obj) rather than obj.get_nrecords() (#2215). Committed by Mike Taves on 2024-06-11. +* [refactor(binarygrid_util)](https://github.com/modflowpy/flopy/commit/ae388ef5a2f40abc950c05ca5b156f7e42337983): Refactor get_iverts to be general and not dependent on grid type (#2230). Committed by langevin-usgs on 2024-06-14. +* [refactor(datafile)](https://github.com/modflowpy/flopy/commit/cfdedbcb35c2f812e2b7efd78706d4eaa8cdc8f5): Deprecate list_records() and other list_ methods (#2232). Committed by Mike Taves on 2024-06-14. +* [refactor](https://github.com/modflowpy/flopy/commit/1e44b3fd57bfad1602a06247e44878a7237e0e3a): Fixes for numpy-2.0 deprecation warnings, require numpy>=1.20.3 (#2237). Committed by Mike Taves on 2024-06-17. +* [refactor](https://github.com/modflowpy/flopy/commit/59040d0948337245d6527671960b56446d39d4d3): Np.where(cond) -> np.asarray(cond).nonzero() (#2238). Committed by wpbonelli on 2024-06-17. +* [refactor(dependencies)](https://github.com/modflowpy/flopy/commit/e48198c661d8b10d1c1120a88a6cd0c7987d7b22): Support numpy 2 (#2241). Committed by wpbonelli on 2024-06-19. +* [refactor(get-modflow)](https://github.com/modflowpy/flopy/commit/baf8dff95ae3cc55adee54ec3e141437ae153b9c): Support ARM macs by default (previously opt-in) (#2225). Committed by wpbonelli on 2024-06-21. +* [refactor(Raster)](https://github.com/modflowpy/flopy/commit/bad483b3910218dc828c993863d540793111090d): Add new methods and checks (#2267). Committed by Joshua Larsen on 2024-07-17. +* [refactor(resample_to_grid)](https://github.com/modflowpy/flopy/commit/bd7f0a578b9093697948255eb9ecc164d5574f6e): Filter raster nan values from scipy resampling routines (#2285). Committed by Joshua Larsen on 2024-08-08. + ### Version 3.7.0 #### New features diff --git a/CITATION.cff b/CITATION.cff index 6b51992fb..b9cf5acb6 100644 --- a/CITATION.cff +++ b/CITATION.cff @@ -3,8 +3,8 @@ message: If you use this software, please cite both the article from preferred-c references, and the software itself. type: software title: FloPy -version: 3.8.0.dev0 -date-released: '2024-05-23' +version: 3.8.0 +date-released: '2024-08-08' doi: 10.5066/F7BK19FH abstract: A Python package to create, run, and post-process MODFLOW-based models. repository-artifact: https://pypi.org/project/flopy diff --git a/README.md b/README.md index 8eef94c5f..3c8bf2008 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ flopy3 -### Version 3.8.0.dev0 (preliminary) +### Version 3.8.0 [![flopy continuous integration](https://github.com/modflowpy/flopy/actions/workflows/commit.yml/badge.svg?branch=develop)](https://github.com/modflowpy/flopy/actions/workflows/commit.yml) [![Read the Docs](https://github.com/modflowpy/flopy/actions/workflows/rtd.yml/badge.svg?branch=develop)](https://github.com/modflowpy/flopy/actions/workflows/rtd.yml) @@ -150,7 +150,7 @@ How to Cite ##### ***Software/Code citation for FloPy:*** -[Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.8.0.dev0 (preliminary): U.S. Geological Survey Software Release, 23 May 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH) +[Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.8.0: U.S. Geological Survey Software Release, 08 August 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH) Additional FloPy Related Publications diff --git a/docs/PyPI_release.md b/docs/PyPI_release.md index e56a38988..016a42854 100644 --- a/docs/PyPI_release.md +++ b/docs/PyPI_release.md @@ -30,4 +30,4 @@ How to Cite *Software/Code citation for FloPy:* -[Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.8.0.dev0 (preliminary): U.S. Geological Survey Software Release, 23 May 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH) +[Bakker, Mark, Post, Vincent, Hughes, J. D., Langevin, C. D., White, J. T., Leaf, A. T., Paulinski, S. R., Bellino, J. C., Morway, E. D., Toews, M. W., Larsen, J. D., Fienen, M. N., Starn, J. J., Brakenhoff, D. A., and Bonelli, W. P., 2024, FloPy v3.8.0: U.S. Geological Survey Software Release, 08 August 2024, https://doi.org/10.5066/F7BK19FH](https://doi.org/10.5066/F7BK19FH) diff --git a/flopy/version.py b/flopy/version.py index aaf8011d1..f0447fffa 100644 --- a/flopy/version.py +++ b/flopy/version.py @@ -1,4 +1,4 @@ # flopy version file automatically created using -# update_version.py on May 23, 2024 17:10:43 +# update_version.py on August 08, 2024 13:58:49 -__version__ = "3.8.0.dev0" +__version__ = "3.8.0" diff --git a/version.txt b/version.txt index ae664ee4d..0be1fc7d2 100644 --- a/version.txt +++ b/version.txt @@ -1 +1 @@ -3.8.0.dev0 \ No newline at end of file +3.8.0 \ No newline at end of file