Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(par_nur_has_converged): set mpi_world using get_mpi_world() #1304

Merged
merged 3 commits into from
Jul 20, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
52 changes: 52 additions & 0 deletions autotest/data/ex-gwf-bump/bottom.txt

Large diffs are not rendered by default.

Binary file added autotest/data/ex-gwf-bump/results.hds.cmp
Binary file not shown.
145 changes: 145 additions & 0 deletions autotest/test_gwf_newton_under_relaxation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,145 @@
import os

import flopy
import numpy as np
import pathlib as pl
import pytest
from conftest import project_root_path
from framework import TestFramework
from simulation import TestSimulation

ex = ["nr_ur01", "nr_ur02"]

data_path = project_root_path / "autotest/data/ex-gwf-bump/"
nper = 1
nlay = 1
nrow = 51
ncol = 51
xlen = 100.0
ylen = 100.0
top = 25.0
k11 = 1.0
H1 = 7.5
H2 = 2.5
delr = xlen / float(ncol)
delc = ylen / float(nrow)
extents = (0, xlen, 0, ylen)
shape2d = (nrow, ncol)
shape3d = (nlay, nrow, ncol)
nouter = 50
ninner = 100
hclose = 1e-9
hclose_outer = hclose * 10.0
rclose = 1e-3
botm = np.loadtxt(data_path / "bottom.txt").reshape(shape3d)
chd_spd = [[0, i, 0, H1] for i in range(nrow)]
chd_spd += [[0, i, ncol - 1, H2] for i in range(nrow)]
base_heads = flopy.utils.HeadFile(data_path / "results.hds.cmp").get_data()


def build_model(idx, ws):
name = ex[idx]
if idx == 1:
sim_ws = pl.Path(f"{ws}/working")
else:
sim_ws = ws
sim = flopy.mf6.MFSimulation(
sim_name=name,
sim_ws=str(sim_ws),
exe_name="mf6",
)
flopy.mf6.ModflowTdis(sim, nper=nper)
linear_acceleration = "bicgstab"
newtonoptions = "newton under_relaxation"

flopy.mf6.ModflowIms(
sim,
print_option="ALL",
linear_acceleration=linear_acceleration,
outer_maximum=nouter,
outer_dvclose=hclose_outer,
inner_maximum=ninner,
inner_dvclose=hclose,
rcloserecord=rclose,
)
gwf = flopy.mf6.ModflowGwf(
sim,
modelname=name,
newtonoptions=newtonoptions,
)
flopy.mf6.ModflowGwfdis(
gwf,
nlay=nlay,
nrow=nrow,
ncol=ncol,
delr=delr,
delc=delc,
top=top,
botm=botm,
idomain=1,
)
flopy.mf6.ModflowGwfnpf(
gwf,
icelltype=1,
k=k11,
)
flopy.mf6.ModflowGwfic(gwf, strt=H1)
flopy.mf6.ModflowGwfchd(gwf, stress_period_data=chd_spd)

head_filerecord = f"{name}.hds"
flopy.mf6.ModflowGwfoc(
gwf,
head_filerecord=head_filerecord,
saverecord=[("HEAD", "ALL")],
)

if idx == 1:
sim.write_simulation(silent=True)
mfsplit = flopy.mf6.utils.Mf6Splitter(sim)
split_array = np.tri(nrow, ncol).astype(int)
new_sim = mfsplit.split_model(split_array)
new_sim.set_sim_path(ws)
mfsplit.save_node_mapping(pl.Path(f"{ws}/mapping.json"))
return new_sim, None
else:
return sim, None


def eval_head(sim):
print(f"evaluating heads...{sim.idxsim}")
mf6sim = flopy.mf6.MFSimulation.load(sim_ws=sim.simpath)
if sim.idxsim == 1:
mfsplit = flopy.mf6.utils.Mf6Splitter(mf6sim)
mfsplit.load_node_mapping(
mf6sim, pl.Path(f"{sim.simpath}/mapping.json")
)
head_dict = {}
for modelname in mf6sim.model_names:
mnum = int(modelname.split("_")[-1])
head_dict[mnum] = (
mf6sim.get_model(modelname).output.head().get_data()
)
heads = mfsplit.reconstruct_array(head_dict)
else:
heads = mf6sim.get_model().output.head().get_data()
msg = "head comparison failed"
assert np.allclose(base_heads, heads), msg


@pytest.mark.parametrize(
"idx, name",
list(enumerate(ex)),
)
def test_mf6model(idx, name, function_tmpdir, targets):
ws = str(function_tmpdir)
test = TestFramework()
test.build(build_model, idx, ws)
test.run(
TestSimulation(
name=name,
exe_dict=targets,
exfunc=eval_head,
idxsim=idx,
),
ws,
)
69 changes: 69 additions & 0 deletions autotest/test_par_gwf_newton_under_relaxation.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,69 @@
import os
from decimal import Decimal
import pytest
from framework import TestFramework
from simulation import TestSimulation

# This tests reuses the simulation data in test_gwf_newton_under_relaxation
# and runs it in parallel on one and two cpus with
#
# so we can compare the parallel coupling of two models
# with a serial model.
#
# This test also checks that Newton under_relaxation works in parallel.

ex = ["par_nr_ur01", "par_nr_ur02"]


def build_petsc_db(idx, exdir):
from test_gwf_newton_under_relaxation import hclose, ninner

petsc_db_file = os.path.join(exdir, ".petscrc")
with open(petsc_db_file, "w") as petsc_file:
petsc_file.write("-ksp_type bicg\n")
petsc_file.write("-pc_type bjacobi\n")
petsc_file.write("-sub_pc_type ilu\n")
petsc_file.write("-sub_pc_factor_levels 2\n")
petsc_file.write(f"-dvclose {Decimal(hclose):.2E}\n")
petsc_file.write(f"-nitermax {ninner}\n")
petsc_file.write("-options_left no\n")


def build_model(idx, exdir):
from test_gwf_newton_under_relaxation import build_model as build_model_ext

build_petsc_db(idx, exdir)
sim, dummy = build_model_ext(idx, exdir)
return sim, dummy


def eval_model(test_sim):
from test_gwf_newton_under_relaxation import eval_head as compare_to_ref

compare_to_ref(test_sim)


@pytest.mark.parallel
@pytest.mark.parametrize(
"idx, name",
list(enumerate(ex)),
)
def test_mf6model(idx, name, function_tmpdir, targets):
test = TestFramework()
test.build(build_model, idx, str(function_tmpdir))
if idx == 1:
ncpus = 2
else:
ncpus = 1
test.run(
TestSimulation(
name=name,
exe_dict=targets,
exfunc=eval_model,
idxsim=idx,
make_comparison=False,
parallel=True,
ncpus=ncpus,
),
str(function_tmpdir),
)
23 changes: 20 additions & 3 deletions src/Solution/NumericalSolution.f90
Original file line number Diff line number Diff line change
Expand Up @@ -160,6 +160,7 @@ module NumericalSolutionModule

! 'protected' (this can be overridden)
procedure :: sln_has_converged
procedure :: sln_sync_newtonur_flag
procedure :: sln_nur_has_converged
procedure :: sln_calc_ptc
procedure :: sln_underrelax
Expand Down Expand Up @@ -1750,6 +1751,9 @@ subroutine solve(this, kiter)
this%dxold(i0:i1), inewtonur, dxmax_nur, locmax_nur)
end do
!
! -- synchronize Newton Under-relaxation flag
inewtonur = this%sln_sync_newtonur_flag(inewtonur)
!
! -- check for convergence if newton under-relaxation applied
if (inewtonur /= 0) then
!
Expand Down Expand Up @@ -3140,14 +3144,27 @@ function sln_has_converged(this, max_dvc) result(has_converged)

end function sln_has_converged

!> @brief Syncronize Newton Under-relaxation flag
!<
function sln_sync_newtonur_flag(this, inewtonur) result(ivalue)
! dummy
class(NumericalSolutionType) :: this !< NumericalSolutionType instance
integer(I4B), intent(in) :: inewtonur !< Newton Under-relaxation flag
! local
integer(I4B) :: ivalue !< Default is set to current value (1 = under-relaxation applied)

ivalue = inewtonur

end function sln_sync_newtonur_flag

!> @brief Custom convergence check for when Newton UR has been applied
!<
function sln_nur_has_converged(this, dxold_max, hncg, dpak) &
result(has_converged)
class(NumericalSolutionType) :: this !< NumericalSolutionType instance
real(DP) :: dxold_max !< the maximum dependent variable change for unrelaxed cells
real(DP) :: hncg !< largest dep. var. change at end of Picard iteration
real(DP) :: dpak !< largest change in advanced packages
real(DP), intent(in) :: dxold_max !< the maximum dependent variable change for unrelaxed cells
real(DP), intent(in) :: hncg !< largest dep. var. change at end of Picard iteration
real(DP), intent(in) :: dpak !< largest change in advanced packages
logical(LGP) :: has_converged !< True, when converged

has_converged = .false.
Expand Down
31 changes: 24 additions & 7 deletions src/Solution/ParallelSolution.f90
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,11 @@ module ParallelSolutionModule
contains
! override
procedure :: sln_has_converged => par_has_converged
procedure :: sln_sync_newtonur_flag => par_sync_newtonur_flag
procedure :: sln_nur_has_converged => par_nur_has_converged
procedure :: sln_calc_ptc => par_calc_ptc
procedure :: sln_underrelax => par_underrelax

end type ParallelSolutionType

contains
Expand Down Expand Up @@ -45,20 +47,36 @@ function par_has_converged(this, max_dvc) result(has_converged)

end function par_has_converged

function par_sync_newtonur_flag(this, inewtonur) result(ivalue)
class(ParallelSolutionType) :: this !< parallel solution instance
integer(I4B), intent(in) :: inewtonur !< local Newton Under-relaxation flag
! local
integer(I4B) :: ivalue !< Maximum of all local values (1 = under-relaxation applied)
integer :: ierr
type(MpiWorldType), pointer :: mpi_world

mpi_world => get_mpi_world()
call MPI_Allreduce(inewtonur, ivalue, 1, MPI_INTEGER, &
MPI_MAX, mpi_world%comm, ierr)

end function par_sync_newtonur_flag

function par_nur_has_converged(this, dxold_max, hncg, dpak) &
result(has_converged)
class(ParallelSolutionType) :: this !< parallel solution instance
real(DP) :: dxold_max !< the maximum dependent variable change for cells not adjusted by NUR
real(DP) :: hncg !< largest dep. var. change at end of Picard iter.
real(DP) :: dpak !< largest change in advanced packages
real(DP), intent(in) :: dxold_max !< the maximum dependent variable change for cells not adjusted by NUR
real(DP), intent(in) :: hncg !< largest dep. var. change at end of Picard iter.
real(DP), intent(in) :: dpak !< largest change in advanced packages
logical(LGP) :: has_converged !< True, when converged
! local
integer(I4B) :: icnvg_local, icnvg_global
integer(I4B) :: icnvg_local
integer(I4B) :: icnvg_global
integer :: ierr
type(MpiWorldType), pointer :: mpi_world

has_converged = .false.
mpi_world => get_mpi_world()

has_converged = .false.
icnvg_local = 0
if (this%NumericalSolutionType%sln_nur_has_converged( &
dxold_max, hncg, dpak)) then
Expand All @@ -67,8 +85,7 @@ function par_nur_has_converged(this, dxold_max, hncg, dpak) &

call MPI_Allreduce(icnvg_local, icnvg_global, 1, MPI_INTEGER, &
MPI_MIN, mpi_world%comm, ierr)

has_converged = (icnvg_global == 1)
if (icnvg_global == 1) has_converged = .true.

end function par_nur_has_converged

Expand Down