diff --git a/.github/environment.yml b/.github/environment.yml index 10437bc1..65035f83 100644 --- a/.github/environment.yml +++ b/.github/environment.yml @@ -1,7 +1,7 @@ dependencies: # build - python >=3.9 - - numpy >=1.21 + - numpy >=1.21,<2 - ipopt - swig - meson >=1.3.2 diff --git a/pyoptsparse/__init__.py b/pyoptsparse/__init__.py index 2f693321..5ab4a539 100644 --- a/pyoptsparse/__init__.py +++ b/pyoptsparse/__init__.py @@ -1,4 +1,4 @@ -__version__ = "2.11.2" +__version__ = "2.11.3" from .pyOpt_history import History from .pyOpt_variable import Variable diff --git a/pyoptsparse/pyIPOPT/pyIPOPT.py b/pyoptsparse/pyIPOPT/pyIPOPT.py index b009a4a4..da0dcffb 100644 --- a/pyoptsparse/pyIPOPT/pyIPOPT.py +++ b/pyoptsparse/pyIPOPT/pyIPOPT.py @@ -161,7 +161,7 @@ def __call__( if len(optProb.constraints) == 0: # If the user *actually* has an unconstrained problem, - # snopt sort of chokes with that....it has to have at + # IPOPT sort of chokes with that....it has to have at # least one constraint. So we will add one # automatically here: self.unconstrained = True @@ -217,19 +217,25 @@ def __call__( # Define the 4 call back functions that ipopt needs: def eval_f(x, user_data=None): fobj, fail = self._masterFunc(x, ["fobj"]) - if fail == 2: + if fail == 1: + fobj = np.array(np.NaN) + elif fail == 2: self.userRequestedTermination = True return fobj def eval_g(x, user_data=None): fcon, fail = self._masterFunc(x, ["fcon"]) - if fail == 2: + if fail == 1: + fcon = np.array(np.NaN) + elif fail == 2: self.userRequestedTermination = True return fcon.copy() def eval_grad_f(x, user_data=None): gobj, fail = self._masterFunc(x, ["gobj"]) - if fail == 2: + if fail == 1: + gobj = np.array(np.NaN) + elif fail == 2: self.userRequestedTermination = True return gobj.copy() @@ -238,7 +244,9 @@ def eval_jac_g(x, flag, user_data=None): return copy.deepcopy(matStruct) else: gcon, fail = self._masterFunc(x, ["gcon"]) - if fail == 2: + if fail == 1: + gcon = np.array(np.NaN) + elif fail == 2: self.userRequestedTermination = True return gcon.copy() diff --git a/pyoptsparse/pyOpt_optimizer.py b/pyoptsparse/pyOpt_optimizer.py index e65f5f15..70ba70c7 100644 --- a/pyoptsparse/pyOpt_optimizer.py +++ b/pyoptsparse/pyOpt_optimizer.py @@ -81,7 +81,7 @@ def __init__( self.storeSens: bool = True # Cache storage - self.cache: Dict[str, Any] = {"x": None, "fobj": None, "fcon": None, "gobj": None, "gcon": None} + self.cache: Dict[str, Any] = {"x": None, "fobj": None, "fcon": None, "gobj": None, "gcon": None, "fail": None} # A second-level cache for optimizers that require callbacks # for each constraint. (eg. PSQP etc) @@ -388,6 +388,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # Update fail flag masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # fobj is now in cache returns.append(self.cache["fobj"]) @@ -437,6 +438,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # Update fail flag masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # fcon is now in cache returns.append(self.cache["fcon"]) @@ -447,10 +449,13 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # The previous evaluated point is different than the point requested for the derivative # OR this is the first call to _masterFunc2 in a hot started optimization # Recursively call the routine with ['fobj', 'fcon'] - self._masterFunc2(x, ["fobj", "fcon"], writeHist=False) + _, _, fail = self._masterFunc2(x, ["fobj", "fcon"], writeHist=False) # We *don't* count that extra call, since that will # screw up the numbering...so we subtract the last call. self.callCounter -= 1 + # Update fail flag + masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # Now, the point has been evaluated correctly so we # determine if we have to run the sens calc: @@ -491,6 +496,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # Update fail flag masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # gobj is now in the cache returns.append(self.cache["gobj"]) @@ -502,10 +508,13 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # The previous evaluated point is different than the point requested for the derivative # OR this is the first call to _masterFunc2 in a hot started optimization # Recursively call the routine with ['fobj', 'fcon'] - self._masterFunc2(x, ["fobj", "fcon"], writeHist=False) + _, _, fail = self._masterFunc2(x, ["fobj", "fcon"], writeHist=False) # We *don't* count that extra call, since that will # screw up the numbering...so we subtract the last call. self.callCounter -= 1 + # Update fail flag + masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # Now, the point has been evaluated correctly so we # determine if we have to run the sens calc: if self.cache["gcon"] is None: @@ -544,13 +553,15 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # Update fail flag masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # gcon is now in the cache returns.append(self.cache["gcon"]) if self.storeSens: hist["funcsSens"] = self.cache["funcsSens"] - # Put the fail flag in the history: + # Update the fail flag with any cached failure and put the fail flag in the history + masterFail = max(self.cache["fail"], masterFail) hist["fail"] = masterFail # Put the iteration counter in the history diff --git a/setup.py b/setup.py index 646d3408..6866e167 100644 --- a/setup.py +++ b/setup.py @@ -103,7 +103,7 @@ def copy_shared_libraries(): keywords="optimization", install_requires=[ "sqlitedict>=1.6", - "numpy>=1.21", + "numpy>=1.21,<2", "scipy>=1.7", "mdolab-baseclasses>=1.3.1", ], diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py new file mode 100644 index 00000000..2e897f6d --- /dev/null +++ b/tests/test_optimizer.py @@ -0,0 +1,272 @@ +"""Test for Optimizer""" + +# Standard Python modules +import unittest + +# External modules +import numpy as np +from parameterized import parameterized + +# First party modules +from pyoptsparse import OPT, Optimization + +MASTERFUNC_OUTPUTS = ["fobj", "fcon", "gobj", "gcon"] + + +class TestOptimizer(unittest.TestCase): + tol = 1e-12 + + def get_objfunc(self, failFlag=False): + """ + Return an objfunc callable function where we can choose whether + the fail flag will be returned as True or False. + """ + # Initialize iters to infinite so the fail flag is never thrown on setup + iters = np.inf + + def objfunc(xdict): + """ + This is a simple quadratic test function with linear constraints. + The actual problem doesn't really matter, since we are not testing optimization, + but just optProb. However, we need to initialize and run an optimization + in order to have optimizer-specific fields in optProb populated, such as + jacIndices. + + This problem is probably not feasible, but that's okay. + + Reset the iteration counter with a special call that includes + a nonfinite value in the design variable vector. + """ + funcs = {} + funcs["obj_0"] = 0 + for x in xdict.keys(): + funcs["obj_0"] += np.sum(np.power(xdict[x], 2)) + for iCon, nc in enumerate(self.nCon): + conName = f"con_{iCon}" + funcs[conName] = np.zeros(nc) + for x in xdict.keys(): + for j in range(nc): + funcs[conName][j] = (iCon + 1) * np.sum(xdict[x]) + + # Throw the fail flag if it's in the specified range or True + nonlocal iters + if isinstance(failFlag, tuple): + if not len(failFlag) == 2: + raise ValueError("Fail flag must be a tuple of (iter start fail, iter end fail) or a boolean") + fail = failFlag[0] <= iters < failFlag[1] + elif isinstance(failFlag, bool): + fail = failFlag + else: + raise ValueError("Fail flag must be a tuple of (iter start fail, iter end fail) or a boolean") + iters += 1 + + # Reset iteration counter if any non-finite values in DV dict + for xVec in xdict.values(): + if not np.all(np.isfinite(xVec)): + iters = 0 + break + return funcs, fail + + return objfunc + + def setup_optProb(self, failFlag=False, nObj=1, nDV=[4], nCon=[2]): + """ + This function sets up a general optimization problem, with arbitrary + DVs, constraints and objectives. + Arbitrary scaling for the various parameters can also be specified. + """ + self.nObj = nObj + self.nDV = nDV + self.nCon = nCon + + # Optimization Object + self.optProb = Optimization("Configurable Test Problem", self.get_objfunc(failFlag=failFlag)) + self.x0 = {} + # Design Variables + for iDV in range(len(nDV)): + n = nDV[iDV] + x0 = np.ones(n) + dvName = f"x{iDV}" + self.x0[dvName] = x0 + self.optProb.addVarGroup( + dvName, + n, + lower=-1, + upper=1, + value=x0, + ) + + # Constraints + for iCon in range(len(nCon)): + nc = nCon[iCon] + self.optProb.addConGroup( + f"con_{iCon}", + nc, + lower=-5, + upper=5, + ) + + # Objective + for iObj in range(nObj): + self.optProb.addObj(f"obj_{iObj}") + + # Finalize + self.optProb.printSparsity() + # create and store optimizer + self.opt = OPT("slsqp", options={"IFILE": "optProb_SLSQP.out"}) + self.opt(self.optProb, sens="FD") + + # Call the masterFunc with some infinite DVs so it resets iters + self.opt._masterFunc(np.full(np.sum(nDV), np.inf), ["fobj"]) + + def test_masterFunc_fobj_fail(self): + """ + Test that if the objective fails when _masterFunc is called, + the fail flag is returned with the expected value. + """ + nDV = [4] + self.setup_optProb(failFlag=(1, 100), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + + # Do not fail + _, fail = self.opt._masterFunc(x, ["fobj"]) + self.assertFalse(fail) + + # Should fail on the second function call + x += 1 # change x so it doesn't use the cache + _, fail = self.opt._masterFunc(x, ["fobj"]) + self.assertTrue(fail) + + @parameterized.expand(MASTERFUNC_OUTPUTS) + def test_masterFunc_output_fail_cache(self, output): + """ + Test that if an output fails when _masterFunc is called + and it is then called again with the same x vector, + the fail flag is returned with the expected value. + """ + nDV = [4] + # Set fail flag to (0, 1) so we know for sure that it's using + # the cache since the only failure is on the first call + self.setup_optProb(failFlag=(0, 1), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + + # Fail + _, fail = self.opt._masterFunc(x, [output]) + self.assertTrue(fail) + + # Should fail with the same x vector using the cache + _, fail = self.opt._masterFunc(x, [output]) + self.assertTrue(fail) + + # Do the same thing one more time to make sure the cache is really really working + _, fail = self.opt._masterFunc(x, [output]) + self.assertTrue(fail) + + def test_masterFunc_gobj_fail_cache(self): + """ + Test that if the gradient fails when _masterFunc is called + and it is then called again with the same x vector, + the fail flag is returned with the expected value. + """ + nDV = [4] + self.setup_optProb(failFlag=True, nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + + # Fail + _, _, fail = self.opt._masterFunc(x, ["gcon", "gobj"]) + self.assertTrue(fail) + + # Should fail with the same x vector using the cache + _, fail = self.opt._masterFunc(x, ["gobj"]) + self.assertTrue(fail) + + def test_masterFunc_fobj_fcon_cache_fail(self): + """ + Test that if the objective fails when _masterFunc is called + and then the constraints are called, it still returns a failure. + """ + nDV = [4] + self.setup_optProb(failFlag=(1, 100), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + + # Do not fail + _, fail = self.opt._masterFunc(x, ["fobj"]) + self.assertFalse(fail) + + # Check that the cached value does not fail either + _, fail = self.opt._masterFunc(x, ["fcon"]) + self.assertFalse(fail) + + # Should fail on the second function call + x += 1 # change x so it doesn't use the cache + _, fail = self.opt._masterFunc(x, ["fobj"]) + self.assertTrue(fail) + + # Check that the cached value now fails too + _, fail = self.opt._masterFunc(x, ["fcon"]) + self.assertTrue(fail) + + def test_masterFunc_fail_then_success(self): + """ + Test that if the objective/constraint fails when _masterFunc is called + and then it succeeds, the fail flag is no longer true. + """ + nDV = [4, 5] + self.setup_optProb(failFlag=(0, 1), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + + # Fail + _, _, fail = self.opt._masterFunc(x, ["fobj", "fcon"]) + self.assertTrue(fail) + + # Should succeed on the second call + x += 1 # change x so it doesn't use the cache + _, _, fail = self.opt._masterFunc(x, ["fobj", "fcon"]) + self.assertFalse(fail) + + def test_masterFunc_fail_grad_after_fail_func(self): + """ + Test that if the _masterFunc is called to compute the gradients on + an x that isn't in the cache and the primal fails, it returns a + fail flag for the gradient too. + """ + nDV = [4, 5] + self.setup_optProb(failFlag=True, nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + 5 + + # Fail obj gradient on DVs that haven't been evaluated when the primal fails + _, fail = self.opt._masterFunc(x, ["gobj"]) + self.assertTrue(fail) + + # Fail con gradient on DVs that haven't been evaluated when the primal fails + x += 1 + _, fail = self.opt._masterFunc(x, ["gcon"]) + self.assertTrue(fail) + + def test_masterFunc_succeed_grad_after_fail_func(self): + """ + Test that if the _masterFunc is called to compute the gradients on + an x that is in the cache and the primal fails, it returns a + False fail flag for the gradient. + """ + nDV = [4, 5] + self.setup_optProb(failFlag=(0, 1), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + 5 + + _, fail = self.opt._masterFunc(x, ["fobj"]) # call primal to put x in the cache + self.assertTrue(fail) + + # Gradient succeeds even though primal failed + _, _, fail = self.opt._masterFunc(x, ["gobj", "gcon"]) + self.assertFalse(fail) + + +if __name__ == "__main__": + unittest.main()