Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix _masterFunc2 fail flag caching and add fail flag identification to IPOPT #407

Merged
merged 15 commits into from
Jun 23, 2024
Merged
18 changes: 13 additions & 5 deletions pyoptsparse/pyIPOPT/pyIPOPT.py
Original file line number Diff line number Diff line change
Expand Up @@ -161,7 +161,7 @@

if len(optProb.constraints) == 0:
# If the user *actually* has an unconstrained problem,
# snopt sort of chokes with that....it has to have at
# IPOPT sort of chokes with that....it has to have at
# least one constraint. So we will add one
# automatically here:
self.unconstrained = True
Expand Down Expand Up @@ -217,19 +217,25 @@
# Define the 4 call back functions that ipopt needs:
def eval_f(x, user_data=None):
fobj, fail = self._masterFunc(x, ["fobj"])
if fail == 2:
if fail == 1:
ewu63 marked this conversation as resolved.
Show resolved Hide resolved
fobj = np.array(np.NaN)

Check warning on line 221 in pyoptsparse/pyIPOPT/pyIPOPT.py

View check run for this annotation

Codecov / codecov/patch

pyoptsparse/pyIPOPT/pyIPOPT.py#L221

Added line #L221 was not covered by tests
elif fail == 2:
ewu63 marked this conversation as resolved.
Show resolved Hide resolved
self.userRequestedTermination = True
return fobj

def eval_g(x, user_data=None):
fcon, fail = self._masterFunc(x, ["fcon"])
if fail == 2:
if fail == 1:
fcon = np.array(np.NaN)

Check warning on line 229 in pyoptsparse/pyIPOPT/pyIPOPT.py

View check run for this annotation

Codecov / codecov/patch

pyoptsparse/pyIPOPT/pyIPOPT.py#L229

Added line #L229 was not covered by tests
elif fail == 2:
self.userRequestedTermination = True
return fcon.copy()

def eval_grad_f(x, user_data=None):
gobj, fail = self._masterFunc(x, ["gobj"])
if fail == 2:
if fail == 1:
gobj = np.array(np.NaN)

Check warning on line 237 in pyoptsparse/pyIPOPT/pyIPOPT.py

View check run for this annotation

Codecov / codecov/patch

pyoptsparse/pyIPOPT/pyIPOPT.py#L237

Added line #L237 was not covered by tests
elif fail == 2:
self.userRequestedTermination = True
return gobj.copy()

Expand All @@ -238,7 +244,9 @@
return copy.deepcopy(matStruct)
else:
gcon, fail = self._masterFunc(x, ["gcon"])
if fail == 2:
if fail == 1:
gcon = np.array(np.NaN)

Check warning on line 248 in pyoptsparse/pyIPOPT/pyIPOPT.py

View check run for this annotation

Codecov / codecov/patch

pyoptsparse/pyIPOPT/pyIPOPT.py#L248

Added line #L248 was not covered by tests
elif fail == 2:
self.userRequestedTermination = True
return gcon.copy()

Expand Down
19 changes: 15 additions & 4 deletions pyoptsparse/pyOpt_optimizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def __init__(
self.storeSens: bool = True

# Cache storage
self.cache: Dict[str, Any] = {"x": None, "fobj": None, "fcon": None, "gobj": None, "gcon": None}
self.cache: Dict[str, Any] = {"x": None, "fobj": None, "fcon": None, "gobj": None, "gcon": None, "fail": None}

# A second-level cache for optimizers that require callbacks
# for each constraint. (eg. PSQP etc)
Expand Down Expand Up @@ -388,6 +388,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True):

# Update fail flag
masterFail = max(masterFail, fail)
self.cache["fail"] = masterFail

# fobj is now in cache
returns.append(self.cache["fobj"])
Expand Down Expand Up @@ -437,6 +438,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True):

# Update fail flag
masterFail = max(masterFail, fail)
self.cache["fail"] = masterFail

# fcon is now in cache
returns.append(self.cache["fcon"])
Expand All @@ -447,10 +449,13 @@ def _masterFunc2(self, x, evaluate, writeHist=True):
# The previous evaluated point is different than the point requested for the derivative
# OR this is the first call to _masterFunc2 in a hot started optimization
# Recursively call the routine with ['fobj', 'fcon']
self._masterFunc2(x, ["fobj", "fcon"], writeHist=False)
_, _, fail = self._masterFunc2(x, ["fobj", "fcon"], writeHist=False)
# We *don't* count that extra call, since that will
# screw up the numbering...so we subtract the last call.
self.callCounter -= 1
# Update fail flag
masterFail = max(masterFail, fail)
self.cache["fail"] = masterFail
# Now, the point has been evaluated correctly so we
# determine if we have to run the sens calc:

Expand Down Expand Up @@ -491,6 +496,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True):

# Update fail flag
masterFail = max(masterFail, fail)
self.cache["fail"] = masterFail

# gobj is now in the cache
returns.append(self.cache["gobj"])
Expand All @@ -502,10 +508,13 @@ def _masterFunc2(self, x, evaluate, writeHist=True):
# The previous evaluated point is different than the point requested for the derivative
# OR this is the first call to _masterFunc2 in a hot started optimization
# Recursively call the routine with ['fobj', 'fcon']
self._masterFunc2(x, ["fobj", "fcon"], writeHist=False)
_, _, fail = self._masterFunc2(x, ["fobj", "fcon"], writeHist=False)
# We *don't* count that extra call, since that will
# screw up the numbering...so we subtract the last call.
self.callCounter -= 1
# Update fail flag
masterFail = max(masterFail, fail)
self.cache["fail"] = masterFail
# Now, the point has been evaluated correctly so we
# determine if we have to run the sens calc:
if self.cache["gcon"] is None:
Expand Down Expand Up @@ -544,13 +553,15 @@ def _masterFunc2(self, x, evaluate, writeHist=True):

# Update fail flag
masterFail = max(masterFail, fail)
self.cache["fail"] = masterFail

# gcon is now in the cache
returns.append(self.cache["gcon"])
if self.storeSens:
hist["funcsSens"] = self.cache["funcsSens"]

# Put the fail flag in the history:
# Update the fail flag with any cached failure and put the fail flag in the history
masterFail = max(self.cache["fail"], masterFail)
ewu63 marked this conversation as resolved.
Show resolved Hide resolved
hist["fail"] = masterFail

# Put the iteration counter in the history
Expand Down
224 changes: 224 additions & 0 deletions tests/test_optimizer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,224 @@
"""Test for Optimizer"""

# Standard Python modules
import unittest

# External modules
import numpy as np

# First party modules
from pyoptsparse import OPT, Optimization


class TestOptimizer(unittest.TestCase):
tol = 1e-12

def get_objfunc(self, failFlag=False):
"""
Return an objfunc callable function where we can choose whether
the fail flag will be returned as True or False.
"""
# Initialize iters to infinite so the fail flag is never thrown on setup
iters = np.inf

def objfunc(xdict):
"""
This is a simple quadratic test function with linear constraints.
The actual problem doesn't really matter, since we are not testing optimization,
but just optProb. However, we need to initialize and run an optimization
in order to have optimizer-specific fields in optProb populated, such as
jacIndices.

This problem is probably not feasible, but that's okay.

Reset the iteration counter with a special call that includes
a nonfinite value in the design variable vector.
"""
funcs = {}
funcs["obj_0"] = 0
for x in xdict.keys():
funcs["obj_0"] += np.sum(np.power(xdict[x], 2))
for iCon, nc in enumerate(self.nCon):
conName = f"con_{iCon}"
funcs[conName] = np.zeros(nc)
for x in xdict.keys():
for j in range(nc):
funcs[conName][j] = (iCon + 1) * np.sum(xdict[x])

# Throw the fail flag if it's in the specified range or True
nonlocal iters
if isinstance(failFlag, tuple):
if not len(failFlag) == 2:
raise ValueError("Fail flag must be a tuple of (iter start fail, iter end fail) or a boolean")
fail = failFlag[0] <= iters < failFlag[1]
elif isinstance(failFlag, bool):
fail = failFlag
else:
raise ValueError("Fail flag must be a tuple of (iter start fail, iter end fail) or a boolean")
iters += 1

# Reset iteration counter if any non-finite values in DV dict
for xVec in xdict.values():
if not np.all(np.isfinite(xVec)):
iters = 0
break
return funcs, fail

return objfunc

def setup_optProb(self, failFlag=False, nObj=1, nDV=[4], nCon=[2]):
"""
This function sets up a general optimization problem, with arbitrary
DVs, constraints and objectives.
Arbitrary scaling for the various parameters can also be specified.
"""
self.nObj = nObj
self.nDV = nDV
self.nCon = nCon

# Optimization Object
self.optProb = Optimization("Configurable Test Problem", self.get_objfunc(failFlag=failFlag))
self.x0 = {}
# Design Variables
for iDV in range(len(nDV)):
n = nDV[iDV]
x0 = np.ones(n)
dvName = f"x{iDV}"
self.x0[dvName] = x0
self.optProb.addVarGroup(
dvName,
n,
lower=-1,
upper=1,
value=x0,
)

# Constraints
for iCon in range(len(nCon)):
nc = nCon[iCon]
self.optProb.addConGroup(
f"con_{iCon}",
nc,
lower=-5,
upper=5,
)

# Objective
for iObj in range(nObj):
self.optProb.addObj(f"obj_{iObj}")

# Finalize
self.optProb.printSparsity()
# create and store optimizer
self.opt = OPT("slsqp", options={"IFILE": "optProb_SLSQP.out"})
self.opt(self.optProb, sens="FD")

# Call the masterFunc with some infinite DVs so it resets iters
self.opt._masterFunc(np.full(np.sum(nDV), np.inf), ["fobj"])

def test_masterFunc_fobj_fail(self):
ewu63 marked this conversation as resolved.
Show resolved Hide resolved
"""
Test that if the objective fails when _masterFunc is called,
the fail flag is returned with the expected value.
"""
nDV = [4]
self.setup_optProb(failFlag=(1, 100), nDV=nDV)

x = np.ones(np.sum(nDV), dtype=float)

# Do not fail
_, fail = self.opt._masterFunc(x, ["fobj"])
self.assertFalse(fail)

# Should fail on the second function call
x += 1 # change x so it doesn't use the cache
_, fail = self.opt._masterFunc(x, ["fobj"])
self.assertTrue(fail)

def test_masterFunc_fobj_fcon_cache_fail(self):
"""
Test that if the objective fails when _masterFunc is called
and then the constraints are called, it still returns a failure.
"""
nDV = [4]
self.setup_optProb(failFlag=(1, 100), nDV=nDV)

x = np.ones(np.sum(nDV), dtype=float)

# Do not fail
_, fail = self.opt._masterFunc(x, ["fobj"])
self.assertFalse(fail)

# Check that the cached value does not fail either
_, fail = self.opt._masterFunc(x, ["fcon"])
self.assertFalse(fail)

# Should fail on the second function call
x += 1 # change x so it doesn't use the cache
_, fail = self.opt._masterFunc(x, ["fobj"])
self.assertTrue(fail)

# Check that the cached value now fails too
_, fail = self.opt._masterFunc(x, ["fcon"])
self.assertTrue(fail)

def test_masterFunc_fail_then_success(self):
"""
Test that if the objective/constraint fails when _masterFunc is called
and then it succeeds, the fail flag is no longer true.
"""
nDV = [4, 5]
self.setup_optProb(failFlag=(0, 1), nDV=nDV)

x = np.ones(np.sum(nDV), dtype=float)

# Fail
_, _, fail = self.opt._masterFunc(x, ["fobj", "fcon"])
self.assertTrue(fail)

# Should succeed on the second call
x += 1 # change x so it doesn't use the cache
_, _, fail = self.opt._masterFunc(x, ["fobj", "fcon"])
self.assertFalse(fail)

def test_masterFunc_fail_grad_after_fail_func(self):
"""
Test that if the _masterFunc is called to compute the gradients on
an x that isn't in the cache and the primal fails, it returns a
fail flag for the gradient too.
"""
nDV = [4, 5]
self.setup_optProb(failFlag=(0, 1000), nDV=nDV)

x = np.ones(np.sum(nDV), dtype=float) + 5

# Fail obj gradient on DVs that haven't been evaluated
_, fail = self.opt._masterFunc(x, ["gobj"])
self.assertTrue(fail)

# Fail con gradient on DVs that haven't been evaluated
x += 1
_, fail = self.opt._masterFunc(x, ["gcon"])
self.assertTrue(fail)

def test_masterFunc_succeed_grad_after_fail_func(self):
"""
Test that if the _masterFunc is called to compute the gradients on
an x that is in the cache and the primal fails, it returns a
False fail flag for the gradient.
"""
nDV = [4, 5]
self.setup_optProb(failFlag=(0, 1), nDV=nDV)

x = np.ones(np.sum(nDV), dtype=float) + 5

_, fail = self.opt._masterFunc(x, ["fobj"]) # call primal to put x in the cache
self.assertTrue(fail)

# Gradient succeeds even though primal failed
_, _, fail = self.opt._masterFunc(x, ["gobj", "gcon"])
self.assertFalse(fail)


if __name__ == "__main__":
unittest.main()
Loading