From 21e58a755abe66ece25e5b50e65e55dd3c815b18 Mon Sep 17 00:00:00 2001 From: Eytan Adler Date: Thu, 20 Jun 2024 13:32:12 -0400 Subject: [PATCH 01/15] Cache fail flag --- pyoptsparse/pyOpt_optimizer.py | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/pyoptsparse/pyOpt_optimizer.py b/pyoptsparse/pyOpt_optimizer.py index e65f5f15..70ba70c7 100644 --- a/pyoptsparse/pyOpt_optimizer.py +++ b/pyoptsparse/pyOpt_optimizer.py @@ -81,7 +81,7 @@ def __init__( self.storeSens: bool = True # Cache storage - self.cache: Dict[str, Any] = {"x": None, "fobj": None, "fcon": None, "gobj": None, "gcon": None} + self.cache: Dict[str, Any] = {"x": None, "fobj": None, "fcon": None, "gobj": None, "gcon": None, "fail": None} # A second-level cache for optimizers that require callbacks # for each constraint. (eg. PSQP etc) @@ -388,6 +388,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # Update fail flag masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # fobj is now in cache returns.append(self.cache["fobj"]) @@ -437,6 +438,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # Update fail flag masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # fcon is now in cache returns.append(self.cache["fcon"]) @@ -447,10 +449,13 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # The previous evaluated point is different than the point requested for the derivative # OR this is the first call to _masterFunc2 in a hot started optimization # Recursively call the routine with ['fobj', 'fcon'] - self._masterFunc2(x, ["fobj", "fcon"], writeHist=False) + _, _, fail = self._masterFunc2(x, ["fobj", "fcon"], writeHist=False) # We *don't* count that extra call, since that will # screw up the numbering...so we subtract the last call. self.callCounter -= 1 + # Update fail flag + masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # Now, the point has been evaluated correctly so we # determine if we have to run the sens calc: @@ -491,6 +496,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # Update fail flag masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # gobj is now in the cache returns.append(self.cache["gobj"]) @@ -502,10 +508,13 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # The previous evaluated point is different than the point requested for the derivative # OR this is the first call to _masterFunc2 in a hot started optimization # Recursively call the routine with ['fobj', 'fcon'] - self._masterFunc2(x, ["fobj", "fcon"], writeHist=False) + _, _, fail = self._masterFunc2(x, ["fobj", "fcon"], writeHist=False) # We *don't* count that extra call, since that will # screw up the numbering...so we subtract the last call. self.callCounter -= 1 + # Update fail flag + masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # Now, the point has been evaluated correctly so we # determine if we have to run the sens calc: if self.cache["gcon"] is None: @@ -544,13 +553,15 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # Update fail flag masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # gcon is now in the cache returns.append(self.cache["gcon"]) if self.storeSens: hist["funcsSens"] = self.cache["funcsSens"] - # Put the fail flag in the history: + # Update the fail flag with any cached failure and put the fail flag in the history + masterFail = max(self.cache["fail"], masterFail) hist["fail"] = masterFail # Put the iteration counter in the history From 9312ab6fc605e47add4997f07c79decdfa3ab15d Mon Sep 17 00:00:00 2001 From: Eytan Adler Date: Thu, 20 Jun 2024 13:32:52 -0400 Subject: [PATCH 02/15] Return NaN from IPOPT callback functions if evaluation failure --- pyoptsparse/pyIPOPT/pyIPOPT.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/pyoptsparse/pyIPOPT/pyIPOPT.py b/pyoptsparse/pyIPOPT/pyIPOPT.py index b009a4a4..1d6ffeb3 100644 --- a/pyoptsparse/pyIPOPT/pyIPOPT.py +++ b/pyoptsparse/pyIPOPT/pyIPOPT.py @@ -217,19 +217,25 @@ def __call__( # Define the 4 call back functions that ipopt needs: def eval_f(x, user_data=None): fobj, fail = self._masterFunc(x, ["fobj"]) - if fail == 2: + if fail == 1: + fobj = np.array(np.NaN) + elif fail == 2: self.userRequestedTermination = True return fobj def eval_g(x, user_data=None): fcon, fail = self._masterFunc(x, ["fcon"]) - if fail == 2: + if fail == 1: + fcon = np.array(np.NaN) + elif fail == 2: self.userRequestedTermination = True return fcon.copy() def eval_grad_f(x, user_data=None): gobj, fail = self._masterFunc(x, ["gobj"]) - if fail == 2: + if fail == 1: + gobj = np.array(np.NaN) + elif fail == 2: self.userRequestedTermination = True return gobj.copy() @@ -238,7 +244,9 @@ def eval_jac_g(x, flag, user_data=None): return copy.deepcopy(matStruct) else: gcon, fail = self._masterFunc(x, ["gcon"]) - if fail == 2: + if fail == 1: + gcon = np.array(np.NaN) + elif fail == 2: self.userRequestedTermination = True return gcon.copy() From d883d2214c3251fc943eb571a735472a3f4f03e2 Mon Sep 17 00:00:00 2001 From: Eytan Adler Date: Thu, 20 Jun 2024 17:41:47 -0400 Subject: [PATCH 03/15] Fixed typo in copy pasted code --- pyoptsparse/pyIPOPT/pyIPOPT.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/pyoptsparse/pyIPOPT/pyIPOPT.py b/pyoptsparse/pyIPOPT/pyIPOPT.py index 1d6ffeb3..19e7f2fe 100644 --- a/pyoptsparse/pyIPOPT/pyIPOPT.py +++ b/pyoptsparse/pyIPOPT/pyIPOPT.py @@ -91,7 +91,7 @@ def _getDefaultOptions(): "sb": [str, "yes"], "print_user_options": [str, "yes"], "output_file": [str, "IPOPT.out"], - "linear_solver": [str, "mumps"], + "linear_solver": [str, "ma86"], } return defOpts @@ -161,7 +161,7 @@ def __call__( if len(optProb.constraints) == 0: # If the user *actually* has an unconstrained problem, - # snopt sort of chokes with that....it has to have at + # IPOPT sort of chokes with that....it has to have at # least one constraint. So we will add one # automatically here: self.unconstrained = True From 14cffac689f74c8988bd322df14a58d81115704f Mon Sep 17 00:00:00 2001 From: Eytan Adler Date: Thu, 20 Jun 2024 21:24:19 -0400 Subject: [PATCH 04/15] Fail flag tests with _masterFunc --- tests/test_optimizer.py | 221 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 221 insertions(+) create mode 100644 tests/test_optimizer.py diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py new file mode 100644 index 00000000..245522af --- /dev/null +++ b/tests/test_optimizer.py @@ -0,0 +1,221 @@ +"""Test for Optimizer""" + +# Standard Python modules +import unittest + +# External modules +import numpy as np + +# First party modules +from pyoptsparse import OPT, Optimization + + +class TestOptimizer(unittest.TestCase): + tol = 1e-12 + + def get_objfunc(self, failFlag=False): + """ + Return an objfunc callable function where we can choose whether + the fail flag will be returned as True or False. + """ + # Initialize iters to infinite so the fail flag is never thrown on setup + iters = np.inf + def objfunc(xdict): + """ + This is a simple quadratic test function with linear constraints. + The actual problem doesn't really matter, since we are not testing optimization, + but just optProb. However, we need to initialize and run an optimization + in order to have optimizer-specific fields in optProb populated, such as + jacIndices. + + This problem is probably not feasible, but that's okay. + + Reset the iteration counter with a special call that includes + a nonfinite value in the design variable vector. + """ + funcs = {} + funcs["obj_0"] = 0 + for x in xdict.keys(): + funcs["obj_0"] += np.sum(np.power(xdict[x], 2)) + for iCon, nc in enumerate(self.nCon): + conName = f"con_{iCon}" + funcs[conName] = np.zeros(nc) + for x in xdict.keys(): + for j in range(nc): + funcs[conName][j] = (iCon + 1) * np.sum(xdict[x]) + + # Throw the fail flag if it's in the specified range or True + nonlocal iters + if isinstance(failFlag, tuple): + if not len(failFlag) == 2: + raise ValueError("Fail flag must be a tuple of (iter start fail, iter end fail) or a boolean") + if failFlag[0] <= iters < failFlag[1]: + fail = True + else: + fail = False + elif isinstance(failFlag, bool): + fail = failFlag + else: + raise ValueError("Fail flag must be a tuple of (iter start fail, iter end fail) or a boolean") + iters += 1 + + # Reset iteration counter if any non-finite values in DV dict + for xVec in xdict.values(): + if not np.all(np.isfinite(xVec)): + iters = 0 + break + return funcs, fail + + return objfunc + + def setup_optProb(self, failFlag=False, nObj=1, nDV=[4], nCon=[2]): + """ + This function sets up a general optimization problem, with arbitrary + DVs, constraints and objectives. + Arbitrary scaling for the various parameters can also be specified. + """ + self.nObj = nObj + self.nDV = nDV + self.nCon = nCon + + # Optimization Object + self.optProb = Optimization("Configurable Test Problem", self.get_objfunc(failFlag=failFlag)) + self.x0 = {} + # Design Variables + for iDV in range(len(nDV)): + n = nDV[iDV] + x0 = np.ones(n) + dvName = f"x{iDV}" + self.x0[dvName] = x0 + self.optProb.addVarGroup( + dvName, + n, + lower=-1, + upper=1, + value=x0, + ) + + # Constraints + for iCon in range(len(nCon)): + nc = nCon[iCon] + self.optProb.addConGroup( + f"con_{iCon}", + nc, + lower=-5, + upper=5, + ) + + # Objective + for iObj in range(nObj): + self.optProb.addObj(f"obj_{iObj}") + + # Finalize + self.optProb.printSparsity() + # create and store optimizer + self.opt = OPT("slsqp", options={"IFILE": "optProb_SLSQP.out"}) + self.opt(self.optProb, sens="FD") + + # Call the masterFunc with some infinite DVs so it resets iters + self.opt._masterFunc(np.full(np.sum(nDV), np.inf), ["fobj"]) + + def test_masterFunc_fobj_fail(self): + """ + Test that if the objective fails when _masterFunc is called, + the fail flag is returned with the expected value. + """ + nDV = [4] + self.setup_optProb(failFlag=(1, 100), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + + # Do not fail + _, fail = self.opt._masterFunc(x, ["fobj"]) + self.assertFalse(fail) + + # Should fail on the second function call + x += 1 # change x so it doesn't use the cache + _, fail = self.opt._masterFunc(x, ["fobj"]) + self.assertTrue(fail) + + def test_masterFunc_fobj_fcon_cache_fail(self): + """ + Test that if the objective fails when _masterFunc is called + and then the constraints are called, it still returns a failure. + """ + nDV = [4] + self.setup_optProb(failFlag=(1, 100), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + + # Do not fail + _, fail = self.opt._masterFunc(x, ["fobj"]) + self.assertFalse(fail) + + # Check that the cached value does not fail either + _, fail = self.opt._masterFunc(x, ["fcon"]) + self.assertFalse(fail) + + # Should fail on the second function call + x += 1 # change x so it doesn't use the cache + _, fail = self.opt._masterFunc(x, ["fobj"]) + self.assertTrue(fail) + + # Check that the cached value now fails too + _, fail = self.opt._masterFunc(x, ["fcon"]) + self.assertTrue(fail) + + def test_masterFunc_fail_then_success(self): + """ + Test that if the objective/constraint fails when _masterFunc is called + and then it succeeds, the fail flag is no longer true. + """ + nDV = [4, 5] + self.setup_optProb(failFlag=(0, 1), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + + # Fail + _, _, fail = self.opt._masterFunc(x, ["fobj", "fcon"]) + self.assertTrue(fail) + + # Should succeed on the second call + x += 1 # change x so it doesn't use the cache + _, _, fail = self.opt._masterFunc(x, ["fobj", "fcon"]) + self.assertFalse(fail) + + def test_masterFunc_fail_grad_after_fail_func(self): + """ + Test that if the _masterFunc is called to compute the gradients on + an x that isn't in the cache and the primal fails, it returns a + fail flag for the gradient too. + """ + nDV = [4, 5] + self.setup_optProb(failFlag=(0, 1), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + 5 + + # Fail + _, _, fail = self.opt._masterFunc(x, ["gobj", "gcon"]) + self.assertTrue(fail) + + def test_masterFunc_succeed_grad_after_fail_func(self): + """ + Test that if the _masterFunc is called to compute the gradients on + an x that is in the cache and the primal fails, it returns a + False fail flag for the gradient. + """ + nDV = [4, 5] + self.setup_optProb(failFlag=(0, 1), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + 5 + + _, fail = self.opt._masterFunc(x, ["fobj"]) # call primal to put x in the cache + self.assertTrue(fail) + + # Gradient succeeds even though primal failed + _, _, fail = self.opt._masterFunc(x, ["gobj", "gcon"]) + self.assertFalse(fail) + + +if __name__ == "__main__": + unittest.main() From 95bbbbcaa2f4598ff85eac50059c8f81a1be4203 Mon Sep 17 00:00:00 2001 From: Eytan Adler Date: Thu, 20 Jun 2024 21:25:24 -0400 Subject: [PATCH 05/15] Undo accidental change of ipopt linear solver --- pyoptsparse/pyIPOPT/pyIPOPT.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyoptsparse/pyIPOPT/pyIPOPT.py b/pyoptsparse/pyIPOPT/pyIPOPT.py index 19e7f2fe..da0dcffb 100644 --- a/pyoptsparse/pyIPOPT/pyIPOPT.py +++ b/pyoptsparse/pyIPOPT/pyIPOPT.py @@ -91,7 +91,7 @@ def _getDefaultOptions(): "sb": [str, "yes"], "print_user_options": [str, "yes"], "output_file": [str, "IPOPT.out"], - "linear_solver": [str, "ma86"], + "linear_solver": [str, "mumps"], } return defOpts From ded7b6a0cde6cc54e50952e6fcdfc61607fd2191 Mon Sep 17 00:00:00 2001 From: Eytan Adler Date: Thu, 20 Jun 2024 21:52:11 -0400 Subject: [PATCH 06/15] Simplify boolean evaluation --- tests/test_optimizer.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py index 245522af..1c262850 100644 --- a/tests/test_optimizer.py +++ b/tests/test_optimizer.py @@ -49,10 +49,7 @@ def objfunc(xdict): if isinstance(failFlag, tuple): if not len(failFlag) == 2: raise ValueError("Fail flag must be a tuple of (iter start fail, iter end fail) or a boolean") - if failFlag[0] <= iters < failFlag[1]: - fail = True - else: - fail = False + fail = failFlag[0] <= iters < failFlag[1] elif isinstance(failFlag, bool): fail = failFlag else: From 788ac14da766f5a758750fb7b895036504a0f4f9 Mon Sep 17 00:00:00 2001 From: Eytan Adler Date: Thu, 20 Jun 2024 21:55:26 -0400 Subject: [PATCH 07/15] Formatting --- tests/test_optimizer.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py index 1c262850..f97ad6ae 100644 --- a/tests/test_optimizer.py +++ b/tests/test_optimizer.py @@ -20,6 +20,7 @@ def get_objfunc(self, failFlag=False): """ # Initialize iters to infinite so the fail flag is never thrown on setup iters = np.inf + def objfunc(xdict): """ This is a simple quadratic test function with linear constraints. @@ -43,7 +44,7 @@ def objfunc(xdict): for x in xdict.keys(): for j in range(nc): funcs[conName][j] = (iCon + 1) * np.sum(xdict[x]) - + # Throw the fail flag if it's in the specified range or True nonlocal iters if isinstance(failFlag, tuple): From 79ea720f382a1b6a872db46493856df5b1ae9ead Mon Sep 17 00:00:00 2001 From: Eytan Adler Date: Thu, 20 Jun 2024 22:07:06 -0400 Subject: [PATCH 08/15] A bit more test coverage --- tests/test_optimizer.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py index f97ad6ae..17aa180a 100644 --- a/tests/test_optimizer.py +++ b/tests/test_optimizer.py @@ -188,12 +188,17 @@ def test_masterFunc_fail_grad_after_fail_func(self): fail flag for the gradient too. """ nDV = [4, 5] - self.setup_optProb(failFlag=(0, 1), nDV=nDV) + self.setup_optProb(failFlag=(0, 1000), nDV=nDV) x = np.ones(np.sum(nDV), dtype=float) + 5 - # Fail - _, _, fail = self.opt._masterFunc(x, ["gobj", "gcon"]) + # Fail obj gradient on DVs that haven't been evaluated + _, fail = self.opt._masterFunc(x, ["gobj"]) + self.assertTrue(fail) + + # Fail con gradient on DVs that haven't been evaluated + x += 1 + _, fail = self.opt._masterFunc(x, ["gcon"]) self.assertTrue(fail) def test_masterFunc_succeed_grad_after_fail_func(self): From 4fd147acb3dfc7ee7ad8da65406f7b5b91b3a830 Mon Sep 17 00:00:00 2001 From: Eytan Adler Date: Fri, 21 Jun 2024 06:52:30 -0400 Subject: [PATCH 09/15] A couple more unit tests to thoroughly test failure flag caching --- tests/test_optimizer.py | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py index 17aa180a..9ddccf72 100644 --- a/tests/test_optimizer.py +++ b/tests/test_optimizer.py @@ -135,6 +135,44 @@ def test_masterFunc_fobj_fail(self): _, fail = self.opt._masterFunc(x, ["fobj"]) self.assertTrue(fail) + def test_masterFunc_fobj_fail_cache(self): + """ + Test that if the objective fails when _masterFunc is called + and it is then called again with the same x vector, + the fail flag is returned with the expected value. + """ + nDV = [4] + self.setup_optProb(failFlag=(0, 100), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + + # Fail + _, _, fail = self.opt._masterFunc(x, ["fcon", "fobj"]) + self.assertTrue(fail) + + # Should fail with the same x vector using the cache + _, fail = self.opt._masterFunc(x, ["fobj"]) + self.assertTrue(fail) + + def test_masterFunc_gobj_fail_cache(self): + """ + Test that if the gradient fails when _masterFunc is called + and it is then called again with the same x vector, + the fail flag is returned with the expected value. + """ + nDV = [4] + self.setup_optProb(failFlag=(0, 100), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + + # Fail + _, _, fail = self.opt._masterFunc(x, ["gcon", "gobj"]) + self.assertTrue(fail) + + # Should fail with the same x vector using the cache + _, fail = self.opt._masterFunc(x, ["gobj"]) + self.assertTrue(fail) + def test_masterFunc_fobj_fcon_cache_fail(self): """ Test that if the objective fails when _masterFunc is called From 604b7a85a463b2b8dc72b8799f797054bd3f6d57 Mon Sep 17 00:00:00 2001 From: Eytan Adler Date: Fri, 21 Jun 2024 07:01:26 -0400 Subject: [PATCH 10/15] More thorough test parameterization --- tests/test_optimizer.py | 23 ++++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py index 9ddccf72..a3a5f91a 100644 --- a/tests/test_optimizer.py +++ b/tests/test_optimizer.py @@ -5,10 +5,12 @@ # External modules import numpy as np +from parameterized import parameterized # First party modules from pyoptsparse import OPT, Optimization +MASTERFUNC_OUTPUTS = ["fobj", "fcon", "gobj", "gcon"] class TestOptimizer(unittest.TestCase): tol = 1e-12 @@ -135,23 +137,26 @@ def test_masterFunc_fobj_fail(self): _, fail = self.opt._masterFunc(x, ["fobj"]) self.assertTrue(fail) - def test_masterFunc_fobj_fail_cache(self): + @parameterized.expand(MASTERFUNC_OUTPUTS) + def test_masterFunc_output_fail_cache(self, output): """ - Test that if the objective fails when _masterFunc is called + Test that if an output fails when _masterFunc is called and it is then called again with the same x vector, the fail flag is returned with the expected value. """ nDV = [4] - self.setup_optProb(failFlag=(0, 100), nDV=nDV) + # Set fail flag to (0, 1) so we know for sure that it's using + # the cache since the only failure is on the first call + self.setup_optProb(failFlag=(0, 1), nDV=nDV) x = np.ones(np.sum(nDV), dtype=float) # Fail - _, _, fail = self.opt._masterFunc(x, ["fcon", "fobj"]) + _, fail = self.opt._masterFunc(x, [output]) self.assertTrue(fail) # Should fail with the same x vector using the cache - _, fail = self.opt._masterFunc(x, ["fobj"]) + _, fail = self.opt._masterFunc(x, [output]) self.assertTrue(fail) def test_masterFunc_gobj_fail_cache(self): @@ -161,7 +166,7 @@ def test_masterFunc_gobj_fail_cache(self): the fail flag is returned with the expected value. """ nDV = [4] - self.setup_optProb(failFlag=(0, 100), nDV=nDV) + self.setup_optProb(failFlag=True, nDV=nDV) x = np.ones(np.sum(nDV), dtype=float) @@ -226,15 +231,15 @@ def test_masterFunc_fail_grad_after_fail_func(self): fail flag for the gradient too. """ nDV = [4, 5] - self.setup_optProb(failFlag=(0, 1000), nDV=nDV) + self.setup_optProb(failFlag=True, nDV=nDV) x = np.ones(np.sum(nDV), dtype=float) + 5 - # Fail obj gradient on DVs that haven't been evaluated + # Fail obj gradient on DVs that haven't been evaluated when the primal fails _, fail = self.opt._masterFunc(x, ["gobj"]) self.assertTrue(fail) - # Fail con gradient on DVs that haven't been evaluated + # Fail con gradient on DVs that haven't been evaluated when the primal fails x += 1 _, fail = self.opt._masterFunc(x, ["gcon"]) self.assertTrue(fail) From 38b4edabe85bbc6b9adcb5d090497fdc1bef74f3 Mon Sep 17 00:00:00 2001 From: Eytan Adler Date: Fri, 21 Jun 2024 07:03:09 -0400 Subject: [PATCH 11/15] Bounce the interpretation --- pyoptsparse/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyoptsparse/__init__.py b/pyoptsparse/__init__.py index 2f693321..5ab4a539 100644 --- a/pyoptsparse/__init__.py +++ b/pyoptsparse/__init__.py @@ -1,4 +1,4 @@ -__version__ = "2.11.2" +__version__ = "2.11.3" from .pyOpt_history import History from .pyOpt_variable import Variable From 1b7d6f1dc85af337e79b8a63d434c27d5cec8030 Mon Sep 17 00:00:00 2001 From: Eytan Adler Date: Fri, 21 Jun 2024 07:08:24 -0400 Subject: [PATCH 12/15] Upper (exclusive) bound of NumPy v2 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 646d3408..6866e167 100644 --- a/setup.py +++ b/setup.py @@ -103,7 +103,7 @@ def copy_shared_libraries(): keywords="optimization", install_requires=[ "sqlitedict>=1.6", - "numpy>=1.21", + "numpy>=1.21,<2", "scipy>=1.7", "mdolab-baseclasses>=1.3.1", ], From 49131ff1cdb2de7e2d8a8056c87ea1f1b66ae86e Mon Sep 17 00:00:00 2001 From: Eytan Adler Date: Fri, 21 Jun 2024 07:10:24 -0400 Subject: [PATCH 13/15] Last last unit test --- tests/test_optimizer.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py index a3a5f91a..35de044c 100644 --- a/tests/test_optimizer.py +++ b/tests/test_optimizer.py @@ -159,6 +159,10 @@ def test_masterFunc_output_fail_cache(self, output): _, fail = self.opt._masterFunc(x, [output]) self.assertTrue(fail) + # Do the same thing one more time to make sure the cache is really really working + _, fail = self.opt._masterFunc(x, [output]) + self.assertTrue(fail) + def test_masterFunc_gobj_fail_cache(self): """ Test that if the gradient fails when _masterFunc is called From 08c3e7ec3e52fbcc95b4cd287b6db454498682f1 Mon Sep 17 00:00:00 2001 From: Eytan Adler Date: Fri, 21 Jun 2024 07:20:30 -0400 Subject: [PATCH 14/15] Formatting --- tests/test_optimizer.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py index 35de044c..2e897f6d 100644 --- a/tests/test_optimizer.py +++ b/tests/test_optimizer.py @@ -12,6 +12,7 @@ MASTERFUNC_OUTPUTS = ["fobj", "fcon", "gobj", "gcon"] + class TestOptimizer(unittest.TestCase): tol = 1e-12 From fc15ad5e5da64cdfac01a984555dddb6263ae413 Mon Sep 17 00:00:00 2001 From: Eytan Adler <63426601+eytanadler@users.noreply.github.com> Date: Fri, 21 Jun 2024 14:40:37 -0400 Subject: [PATCH 15/15] Upper numpy bound in conda environemnt --- .github/environment.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/environment.yml b/.github/environment.yml index 10437bc1..65035f83 100644 --- a/.github/environment.yml +++ b/.github/environment.yml @@ -1,7 +1,7 @@ dependencies: # build - python >=3.9 - - numpy >=1.21 + - numpy >=1.21,<2 - ipopt - swig - meson >=1.3.2