diff --git a/.github/environment.yml b/.github/environment.yml index 5883a678..237cc786 100644 --- a/.github/environment.yml +++ b/.github/environment.yml @@ -1,18 +1,19 @@ dependencies: # build - - python >=3.8 - - numpy >=1.16 + - python >=3.9 + - numpy >=1.21,<2 - ipopt - swig - - meson =0.61 + - meson >=1.3.2 - compilers - pkg-config - pip - setuptools - build + - packaging # testing - parameterized - testflo - - scipy >1.2 + - scipy >=1.7 - mdolab-baseclasses >=1.3.1 - - sqlitedict >=1.6 \ No newline at end of file + - sqlitedict >=1.6 diff --git a/.github/test_real.sh b/.github/test_real.sh index 69eb3d54..4af0258e 100755 --- a/.github/test_real.sh +++ b/.github/test_real.sh @@ -11,4 +11,4 @@ cd tests # we have to copy over the coveragerc file to make sure it's in the # same directory where codecov is run cp ../.coveragerc . -testflo --pre_announce -v --coverage --coverpkg pyoptsparse $EXTRA_FLAGS +testflo --pre_announce --disallow_deprecations -v --coverage --coverpkg pyoptsparse $EXTRA_FLAGS diff --git a/.github/workflows/windows-build.yml b/.github/workflows/windows-build.yml index c5123515..ecca3662 100644 --- a/.github/workflows/windows-build.yml +++ b/.github/workflows/windows-build.yml @@ -16,7 +16,7 @@ jobs: - uses: actions/checkout@v2 - uses: conda-incubator/setup-miniconda@v2 with: - python-version: 3.8 + python-version: 3.9 miniforge-variant: Mambaforge channels: conda-forge,defaults channel-priority: strict diff --git a/.zenodo.json b/.zenodo.json index b5acb306..3d302559 100644 --- a/.zenodo.json +++ b/.zenodo.json @@ -1,7 +1,7 @@ { "creators": [ { - "name": "Neil Wu" + "name": "Ella Wu" }, { "name": "Gaetan Kenway" diff --git a/doc/citation.rst b/doc/citation.rst index f9cd28b5..cafb33d7 100644 --- a/doc/citation.rst +++ b/doc/citation.rst @@ -4,7 +4,7 @@ Citation ======== If you use pyOptSparse, please cite the following paper: - N. Wu, G. Kenway, C. A. Mader, J. Jasa, and J. R. R. A. Martins. pyOptSparse: A Python framework for large-scale constrained nonlinear optimization of sparse systems. Journal of Open Source Software, 5(54), 2564, October 2020. https://doi.org/10.21105/joss.02564 + E. Wu, G. Kenway, C. A. Mader, J. Jasa, and J. R. R. A. Martins. pyOptSparse: A Python framework for large-scale constrained nonlinear optimization of sparse systems. Journal of Open Source Software, 5(54), 2564, October 2020. https://doi.org/10.21105/joss.02564 The paper is available online from the Journal of Open Source Software `here `__. To cite this paper, you can use the following BibTeX entry: @@ -18,7 +18,7 @@ To cite this paper, you can use the following BibTeX entry: volume = {5}, number = {54}, pages = {2564}, - author = {Neil Wu and Gaetan Kenway and Charles A. Mader and John Jasa and Joaquim R. R. A. Martins}, + author = {Ella Wu and Gaetan Kenway and Charles A. Mader and John Jasa and Joaquim R. R. A. Martins}, title = {pyOptSparse: A Python framework for large-scale constrained nonlinear optimization of sparse systems}, journal = {Journal of Open Source Software} } diff --git a/doc/guide.rst b/doc/guide.rst index 0ed5d761..b7341eed 100644 --- a/doc/guide.rst +++ b/doc/guide.rst @@ -22,13 +22,13 @@ The optimization class is created using the following call: .. code-block:: python - optProb = Optimization("name", objFun) + optProb = Optimization("name", objconFun) -The general template of the objective function is as follows: +The general template of the objective and constraint function is as follows: .. code-block:: python - def obj_fun(xdict): + def objconFun(xdict): funcs = {} funcs["obj_name"] = function(xdict) funcs["con_name"] = function(xdict) @@ -196,17 +196,30 @@ This argument is a dictionary, and the keys must match the design variable sets Essentially what we have done is specified the which blocks of the constraint rows are non-zero, and provided the sparsity structure of ones that are sparse. -For linear constraints the values in ``jac`` are meaningful: -they must be the actual linear constraint Jacobian values (which do not change). -For non-linear constraints, only the sparsity structure (i.e. which entries are nonzero) is significant. -The values themselves will be determined by a call to the ``sens()`` function. - -Also note, that the ``wrt`` and ``jac`` keyword arguments are only supported when user-supplied sensitivity is used. +Note that the ``wrt`` and ``jac`` keyword arguments are only supported when user-supplied sensitivity is used. If automatic gradients from pyOptSparse are used, the constraint Jacobian will necessarily be dense. .. note:: Currently, only the optimizers SNOPT and IPOPT support sparse Jacobians. +Linear Constraints +~~~~~~~~~~~~~~~~~~ +Linear constraints in pyOptSparse are defined exclusively by ``jac``, ``lower``, and ``upper`` entries of the ``addConGroup`` method. +For linear constraint :math:`g_L \leq Ax + b \leq g_U`, the constraint definition would look like: + +.. code-block:: python + + optProb.addConGroup("con", num_cons, linear=True, wrt=["xvars"], jac={"xvars": A}, lower=gL - b, upper=gU - b) + +Users should not provide the linear constraint values (i.e., :math:`g = Ax + b`) in a user-defined objective/constraint function. +pyOptSparse will raise an error if you do so. + +For linear constraints, the values in ``jac`` are meaningful: +they must be the actual linear constraint Jacobian values (which do not change). +For non-linear constraints, only the sparsity structure (i.e. which entries are nonzero) is significant. +The values themselves will be determined by a call to the ``sens()`` function. + + Objectives ++++++++++ diff --git a/doc/optimizers/NLPQLP.rst b/doc/optimizers/NLPQLP.rst index 8a928f21..9a15b2b1 100644 --- a/doc/optimizers/NLPQLP.rst +++ b/doc/optimizers/NLPQLP.rst @@ -12,8 +12,8 @@ solved. The line search can be performed with respect to two alternative merit functions, and the Hessian approximation is updated by a modified BFGS formula. -NLPQLP is a proprietary software, which can be obtained `here `_. -The latest version supported is v4.2.2. +NLPQLP is a proprietary software, which can be obtained `here `_. +The supported versions are v4.2.2 and v5.0.3, but other versions may work. Options ------- diff --git a/doc/optimizers/SNOPT_options.yaml b/doc/optimizers/SNOPT_options.yaml index 0c740234..65240285 100644 --- a/doc/optimizers/SNOPT_options.yaml +++ b/doc/optimizers/SNOPT_options.yaml @@ -100,7 +100,37 @@ Return work arrays: These arrays can be used to hot start a subsequent optimization. The SNOPT option 'Sticky parameters' will also be automatically set to 'Yes' to facilitate the hot start. +Work arrays save file: + desc: > + This option is unique to the Python wrapper. + The SNOPT work arrays will be pickled and saved to this file after each major iteration. + This file is useful if you want to restart an optimization that did not exit cleanly. + If None, the work arrays are not saved. + snSTOP function handle: desc: > This option is unique to the Python wrapper. A function handle can be supplied which is called at the end of each major iteration. + The following is an example of a callback function that saves the restart dictionary + to a different file after each major iteration. + + .. code-block:: python + + def snstopCallback(iterDict, restartDict): + # Get the major iteration number + nMajor = iterDict["nMajor"] + + # Save the restart dictionary + writePickle(f"restart_{nMajor}.pickle", restartDict) + + return 0 + +snSTOP arguments: + desc: | + This option is unique to the Python wrapper. + It specifies a list of arguments that will be passed to the snSTOP function handle. + ``iterDict`` is always passed as an argument. + Additional arguments are passed in the same order as this list. + The possible values are + + - ``restartDict`` diff --git a/paper/paper.md b/paper/paper.md index ba8623d4..a6eda955 100644 --- a/paper/paper.md +++ b/paper/paper.md @@ -4,7 +4,7 @@ tags: - optimization - Python authors: - - name: Neil Wu + - name: Ella Wu orcid: 0000-0001-8856-9661 affiliation: 1 - name: Gaetan Kenway diff --git a/pyoptsparse/__init__.py b/pyoptsparse/__init__.py index 1ea04cc7..c122965d 100644 --- a/pyoptsparse/__init__.py +++ b/pyoptsparse/__init__.py @@ -1,4 +1,4 @@ -__version__ = "2.10.1" +__version__ = "2.12.0" from .pyOpt_history import History from .pyOpt_variable import Variable @@ -19,3 +19,25 @@ from .pyNSGA2.pyNSGA2 import NSGA2 from .pyALPSO.pyALPSO import ALPSO from .pyParOpt.ParOpt import ParOpt + +__all__ = [ + "History", + "Variable", + "Gradient", + "Constraint", + "Objective", + "Optimization", + "Optimizer", + "OPT", + "Optimizers", + "Solution", + "SNOPT", + "IPOPT", + "SLSQP", + "CONMIN", + "PSQP", + "NLPQLP", + "NSGA2", + "ALPSO", + "ParOpt", +] diff --git a/pyoptsparse/pyALPSO/pyALPSO.py b/pyoptsparse/pyALPSO/pyALPSO.py index 465728ca..6268618c 100644 --- a/pyoptsparse/pyALPSO/pyALPSO.py +++ b/pyoptsparse/pyALPSO/pyALPSO.py @@ -2,6 +2,7 @@ pyALPSO - A pyOptSparse interface to ALPSO work with sparse optimization problems. """ + # Standard Python modules import datetime import time @@ -10,6 +11,7 @@ import numpy as np # Local modules +from . import alpso from ..pyOpt_error import Error from ..pyOpt_optimizer import Optimizer @@ -25,9 +27,7 @@ class ALPSO(Optimizer): - pll_type -> STR: ALPSO Parallel Implementation (None, SPM- Static, DPM- Dynamic, POA-Parallel Analysis), *Default* = None """ - def __init__(self, raiseError=True, options={}): - from . import alpso - + def __init__(self, options={}): self.alpso = alpso category = "Global Optimizer" @@ -192,9 +192,7 @@ def objconfunc(x): self.optProb.comm.bcast(-1, root=0) # Store Results - sol_inform = {} - # sol_inform['value'] = inform - # sol_inform['text'] = self.informs[inform[0]] + sol_inform = {"value": "", "text": ""} # Create the optimization solution sol = self._createSolution(optTime, sol_inform, opt_f, opt_x) diff --git a/pyoptsparse/pyCONMIN/pyCONMIN.py b/pyoptsparse/pyCONMIN/pyCONMIN.py index 21dd0c07..97b57930 100644 --- a/pyoptsparse/pyCONMIN/pyCONMIN.py +++ b/pyoptsparse/pyCONMIN/pyCONMIN.py @@ -2,11 +2,7 @@ pyCONMIN - A variation of the pyCONMIN wrapper specificially designed to work with sparse optimization problems. """ -# Compiled module -try: - from . import conmin # isort: skip -except ImportError: - conmin = None + # Standard Python modules import datetime import os @@ -18,6 +14,11 @@ # Local modules from ..pyOpt_error import Error from ..pyOpt_optimizer import Optimizer +from ..pyOpt_utils import try_import_compiled_module_from_path + +# import the compiled module +THIS_DIR = os.path.dirname(os.path.abspath(__file__)) +conmin = try_import_compiled_module_from_path("conmin", THIS_DIR, raise_warning=True) class CONMIN(Optimizer): @@ -30,9 +31,8 @@ def __init__(self, raiseError=True, options={}): category = "Local Optimizer" defOpts = self._getDefaultOptions() informs = self._getInforms() - if conmin is None: - if raiseError: - raise Error("There was an error importing the compiled conmin module") + if isinstance(conmin, str) and raiseError: + raise ImportError(conmin) self.set_options = [] super().__init__(name, category, defaultOptions=defOpts, informs=informs, options=options) @@ -241,9 +241,7 @@ def cnmngrad(n1, n2, x, f, g, ct, df, a, ic, nac): self.optProb.comm.bcast(-1, root=0) # Store Results - sol_inform = {} - # sol_inform['value'] = inform - # sol_inform['text'] = self.informs[inform[0]] + sol_inform = {"value": "", "text": ""} # Create the optimization solution sol = self._createSolution(optTime, sol_inform, ff, xs) diff --git a/pyoptsparse/pyIPOPT/pyIPOPT.py b/pyoptsparse/pyIPOPT/pyIPOPT.py index f5233292..da0dcffb 100644 --- a/pyoptsparse/pyIPOPT/pyIPOPT.py +++ b/pyoptsparse/pyIPOPT/pyIPOPT.py @@ -1,24 +1,31 @@ """ pyIPOPT - A python wrapper to the core IPOPT compiled module. """ -# Compiled module -try: - from . import pyipoptcore # isort: skip -except ImportError: - pyipoptcore = None # Standard Python modules import copy import datetime +import os import time # External modules import numpy as np # Local modules -from ..pyOpt_error import Error from ..pyOpt_optimizer import Optimizer -from ..pyOpt_utils import ICOL, INFINITY, IROW, convertToCOO, extractRows, scaleRows +from ..pyOpt_utils import ( + ICOL, + INFINITY, + IROW, + convertToCOO, + extractRows, + scaleRows, + try_import_compiled_module_from_path, +) + +# import the compiled module +THIS_DIR = os.path.dirname(os.path.abspath(__file__)) +pyipoptcore = try_import_compiled_module_from_path("pyipoptcore", THIS_DIR) class IPOPT(Optimizer): @@ -36,9 +43,8 @@ def __init__(self, raiseError=True, options={}): defOpts = self._getDefaultOptions() informs = self._getInforms() - if pyipoptcore is None: - if raiseError: - raise Error("There was an error importing the compiled IPOPT module") + if isinstance(pyipoptcore, str) and raiseError: + raise ImportError(pyipoptcore) super().__init__( name, @@ -155,7 +161,7 @@ def __call__( if len(optProb.constraints) == 0: # If the user *actually* has an unconstrained problem, - # snopt sort of chokes with that....it has to have at + # IPOPT sort of chokes with that....it has to have at # least one constraint. So we will add one # automatically here: self.unconstrained = True @@ -211,19 +217,25 @@ def __call__( # Define the 4 call back functions that ipopt needs: def eval_f(x, user_data=None): fobj, fail = self._masterFunc(x, ["fobj"]) - if fail == 2: + if fail == 1: + fobj = np.array(np.NaN) + elif fail == 2: self.userRequestedTermination = True return fobj def eval_g(x, user_data=None): fcon, fail = self._masterFunc(x, ["fcon"]) - if fail == 2: + if fail == 1: + fcon = np.array(np.NaN) + elif fail == 2: self.userRequestedTermination = True return fcon.copy() def eval_grad_f(x, user_data=None): gobj, fail = self._masterFunc(x, ["gobj"]) - if fail == 2: + if fail == 1: + gobj = np.array(np.NaN) + elif fail == 2: self.userRequestedTermination = True return gobj.copy() @@ -232,7 +244,9 @@ def eval_jac_g(x, flag, user_data=None): return copy.deepcopy(matStruct) else: gcon, fail = self._masterFunc(x, ["gcon"]) - if fail == 2: + if fail == 1: + gcon = np.array(np.NaN) + elif fail == 2: self.userRequestedTermination = True return gcon.copy() diff --git a/pyoptsparse/pyNLPQLP/pyNLPQLP.py b/pyoptsparse/pyNLPQLP/pyNLPQLP.py index 348f8e4f..c9f0fff2 100644 --- a/pyoptsparse/pyNLPQLP/pyNLPQLP.py +++ b/pyoptsparse/pyNLPQLP/pyNLPQLP.py @@ -2,11 +2,6 @@ pyNLPQLP - A pyOptSparse wrapper for Schittkowski's NLPQLP optimization algorithm. """ -# Compiled module -try: - from . import nlpqlp # isort: skip -except ImportError: - nlpqlp = None # Standard Python modules import datetime import os @@ -18,6 +13,11 @@ # Local modules from ..pyOpt_error import Error from ..pyOpt_optimizer import Optimizer +from ..pyOpt_utils import try_import_compiled_module_from_path + +# import the compiled module +THIS_DIR = os.path.dirname(os.path.abspath(__file__)) +nlpqlp = try_import_compiled_module_from_path("nlpqlp", THIS_DIR) class NLPQLP(Optimizer): @@ -30,9 +30,8 @@ def __init__(self, raiseError=True, options={}): category = "Local Optimizer" defOpts = self._getDefaultOptions() informs = self._getInforms() - if nlpqlp is None: - if raiseError: - raise Error("There was an error importing the compiled nlpqlp module") + if isinstance(nlpqlp, str) and raiseError: + raise ImportError(nlpqlp) super().__init__(name, category, defaultOptions=defOpts, informs=informs, options=options) # NLPQLP needs Jacobians in dense format diff --git a/pyoptsparse/pyNSGA2/pyNSGA2.py b/pyoptsparse/pyNSGA2/pyNSGA2.py index 125e3b0f..c6bd2e5c 100644 --- a/pyoptsparse/pyNSGA2/pyNSGA2.py +++ b/pyoptsparse/pyNSGA2/pyNSGA2.py @@ -2,12 +2,9 @@ pyNSGA2 - A variation of the pyNSGA2 wrapper specificially designed to work with sparse optimization problems. """ -# Compiled module -try: - from . import nsga2 # isort: skip -except ImportError: - nsga2 = None + # Standard Python modules +import os import time # External modules @@ -16,6 +13,11 @@ # Local modules from ..pyOpt_error import Error from ..pyOpt_optimizer import Optimizer +from ..pyOpt_utils import try_import_compiled_module_from_path + +# import the compiled module +THIS_DIR = os.path.dirname(os.path.abspath(__file__)) +nsga2 = try_import_compiled_module_from_path("nsga2", THIS_DIR, raise_warning=True) class NSGA2(Optimizer): @@ -30,9 +32,8 @@ def __init__(self, raiseError=True, options={}): informs = self._getInforms() super().__init__(name, category, defaultOptions=defOpts, informs=informs, options=options) - if nsga2 is None: - if raiseError: - raise Error("There was an error importing the compiled nsga2 module") + if isinstance(nsga2, str) and raiseError: + raise ImportError(nsga2) @staticmethod def _getInforms(): @@ -180,7 +181,7 @@ def objconfunc(nreal, nobj, ncon, x, f, g): self.optProb.comm.bcast(-1, root=0) # Store Results - sol_inform = {} + sol_inform = {"value": "", "text": ""} xstar = [0.0] * n for i in range(n): diff --git a/pyoptsparse/pyNSGA2/source/nsga2.h b/pyoptsparse/pyNSGA2/source/nsga2.h index 3ed100b9..051382fa 100644 --- a/pyoptsparse/pyNSGA2/source/nsga2.h +++ b/pyoptsparse/pyNSGA2/source/nsga2.h @@ -127,7 +127,7 @@ void mutation_ind (individual *ind, Global global, int *nrealmut, int *nbinmut); void bin_mutate_ind (individual *ind, Global global, int *nbinmut); void real_mutate_ind (individual *ind, Global global, int *nrealmut); -//void nsga2func (int nreal, int nbin, int nobj, int ncon, double *xreal, double *xbin, int **gene, double *obj, double *constr); +void nsga2func (int nreal, int nbin, int nobj, int ncon, double *xreal, double *xbin, int **gene, double *obj, double *constr); void assign_rank_and_crowding_distance (population *new_pop, Global global); diff --git a/pyoptsparse/pyOpt_constraint.py b/pyoptsparse/pyOpt_constraint.py index 80cfe85c..e85a850d 100644 --- a/pyoptsparse/pyOpt_constraint.py +++ b/pyoptsparse/pyOpt_constraint.py @@ -8,8 +8,8 @@ # Local modules from .pyOpt_error import Error, pyOptSparseWarning +from .pyOpt_types import Dict1DType from .pyOpt_utils import INFINITY, _broadcast_to_array, convertToCOO -from .types import Dict1DType class Constraint: diff --git a/pyoptsparse/pyOpt_gradient.py b/pyoptsparse/pyOpt_gradient.py index ec6e58a9..27083c64 100644 --- a/pyoptsparse/pyOpt_gradient.py +++ b/pyoptsparse/pyOpt_gradient.py @@ -8,7 +8,7 @@ # Local modules from .pyOpt_MPI import MPI from .pyOpt_optimization import Optimization -from .types import Dict1DType, Dict2DType +from .pyOpt_types import Dict1DType, Dict2DType class Gradient: diff --git a/pyoptsparse/pyOpt_history.py b/pyoptsparse/pyOpt_history.py index b204f192..efc31dd7 100644 --- a/pyoptsparse/pyOpt_history.py +++ b/pyoptsparse/pyOpt_history.py @@ -651,7 +651,10 @@ def getValues(self, names=None, callCounters=None, major=True, scale=False, stac # reshape lists into numpy arrays for name in names: # we just stack along axis 0 - data[name] = np.stack(data[name], axis=0) + if len(data[name]) > 0: + data[name] = np.stack(data[name], axis=0) + else: + data[name] = np.array(data[name]) # we cast 1D arrays to 2D, for scalar values if data[name].ndim == 1: data[name] = np.expand_dims(data[name], 1) diff --git a/pyoptsparse/pyOpt_optimization.py b/pyoptsparse/pyOpt_optimization.py index ac41174d..ee1c8926 100644 --- a/pyoptsparse/pyOpt_optimization.py +++ b/pyoptsparse/pyOpt_optimization.py @@ -15,6 +15,7 @@ from .pyOpt_constraint import Constraint from .pyOpt_error import Error from .pyOpt_objective import Objective +from .pyOpt_types import Dict1DType, Dict2DType, NumpyType from .pyOpt_utils import ( ICOL, IDATA, @@ -28,7 +29,6 @@ scaleRows, ) from .pyOpt_variable import Variable -from .types import Dict1DType, Dict2DType, NumpyType class Optimization: @@ -1213,7 +1213,7 @@ def processContoDict( scaled : bool Flag specifying if the returned array should be scaled by - the pyOpt scaling. The only type this is not true is + the pyOpt scaling. The only time this is not true is when the automatic derivatives are used dtype : str @@ -1577,9 +1577,17 @@ def _mapContoOpt_Dict(self, conDict: Dict1DType) -> Dict1DType: con_opt = self._mapContoOpt(con) return self.processContoDict(con_opt, scaled=False, natural=True) - def __str__(self): + def summary_str(self, minimal_print=False): """ Print Structured Optimization Problem + + Parameters + ---------- + minimal_print : bool + Flag to specify if the printed results should only include + variables and constraints with a non-empty status + (for example a violated bound). + This defaults to False, which will print all results. """ TOL = 1.0e-6 @@ -1642,7 +1650,8 @@ def __str__(self): else: raise ValueError(f"Unrecognized type for variable {var.name}: {var.type}") - text += fmt.format(idx, var.name, var.type, lower, value, upper, status, width=num_c) + if not minimal_print or status: + text += fmt.format(idx, var.name, var.type, lower, value, upper, status, width=num_c) idx += 1 if len(self.constraints) > 0: @@ -1698,13 +1707,17 @@ def __str__(self): # Active upper bound status += "u" - text += fmt.format( - idx, c.name, typ, lower, value, upper, status, lambdaStar[con_name][j], width=num_c - ) + if not minimal_print or status: + text += fmt.format( + idx, c.name, typ, lower, value, upper, status, lambdaStar[con_name][j], width=num_c + ) idx += 1 return text + def __str__(self): + return self.summary_str(minimal_print=False) + def __getstate__(self) -> dict: """ This is used for serializing class instances. diff --git a/pyoptsparse/pyOpt_optimizer.py b/pyoptsparse/pyOpt_optimizer.py index e65f5f15..2ae6dca0 100644 --- a/pyoptsparse/pyOpt_optimizer.py +++ b/pyoptsparse/pyOpt_optimizer.py @@ -81,7 +81,7 @@ def __init__( self.storeSens: bool = True # Cache storage - self.cache: Dict[str, Any] = {"x": None, "fobj": None, "fcon": None, "gobj": None, "gcon": None} + self.cache: Dict[str, Any] = {"x": None, "fobj": None, "fcon": None, "gobj": None, "gcon": None, "fail": None} # A second-level cache for optimizers that require callbacks # for each constraint. (eg. PSQP etc) @@ -367,6 +367,10 @@ def _masterFunc2(self, x, evaluate, writeHist=True): self.userObjTime += time.time() - timeA self.userObjCalls += 1 + # Make sure the user-defined function does *not* return linear constraint values + if self.callCounter == 0: + self._checkLinearConstraints(funcs) + # Discard zero imaginary components in funcs for key, val in funcs.items(): funcs[key] = np.real(val) @@ -388,6 +392,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # Update fail flag masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # fobj is now in cache returns.append(self.cache["fobj"]) @@ -416,6 +421,10 @@ def _masterFunc2(self, x, evaluate, writeHist=True): self.userObjTime += time.time() - timeA self.userObjCalls += 1 + # Make sure the user-defined function does *not* return linear constraint values + if self.callCounter == 0: + self._checkLinearConstraints(funcs) + # Discard zero imaginary components in funcs for key, val in funcs.items(): funcs[key] = np.real(val) @@ -437,6 +446,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # Update fail flag masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # fcon is now in cache returns.append(self.cache["fcon"]) @@ -447,10 +457,13 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # The previous evaluated point is different than the point requested for the derivative # OR this is the first call to _masterFunc2 in a hot started optimization # Recursively call the routine with ['fobj', 'fcon'] - self._masterFunc2(x, ["fobj", "fcon"], writeHist=False) + _, _, fail = self._masterFunc2(x, ["fobj", "fcon"], writeHist=False) # We *don't* count that extra call, since that will # screw up the numbering...so we subtract the last call. self.callCounter -= 1 + # Update fail flag + masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # Now, the point has been evaluated correctly so we # determine if we have to run the sens calc: @@ -491,6 +504,7 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # Update fail flag masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # gobj is now in the cache returns.append(self.cache["gobj"]) @@ -502,10 +516,13 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # The previous evaluated point is different than the point requested for the derivative # OR this is the first call to _masterFunc2 in a hot started optimization # Recursively call the routine with ['fobj', 'fcon'] - self._masterFunc2(x, ["fobj", "fcon"], writeHist=False) + _, _, fail = self._masterFunc2(x, ["fobj", "fcon"], writeHist=False) # We *don't* count that extra call, since that will # screw up the numbering...so we subtract the last call. self.callCounter -= 1 + # Update fail flag + masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # Now, the point has been evaluated correctly so we # determine if we have to run the sens calc: if self.cache["gcon"] is None: @@ -544,13 +561,15 @@ def _masterFunc2(self, x, evaluate, writeHist=True): # Update fail flag masterFail = max(masterFail, fail) + self.cache["fail"] = masterFail # gcon is now in the cache returns.append(self.cache["gcon"]) if self.storeSens: hist["funcsSens"] = self.cache["funcsSens"] - # Put the fail flag in the history: + # Update the fail flag with any cached failure and put the fail flag in the history + masterFail = max(self.cache["fail"], masterFail) hist["fail"] = masterFail # Put the iteration counter in the history @@ -856,6 +875,18 @@ def _on_setOption(self, name, value): """ pass + def _checkLinearConstraints(self, funcs): + """ + Makes sure that the user-defined obj/con function does not compute the linear constraint values + because the linear constraints are exclusively defined by jac and bounds in addConGroup. + """ + for conName in self.optProb.constraints: + if self.optProb.constraints[conName].linear and conName in funcs: + raise Error( + "Value for linear constraint returned from user obj function. Linear constraints " + + "are evaluated internally and should not be returned from the user's function." + ) + def setOption(self, name, value=None): """ Generic routine for all option setting. The routine does diff --git a/pyoptsparse/pyOpt_solution.py b/pyoptsparse/pyOpt_solution.py index 1d41b342..f6fba48e 100644 --- a/pyoptsparse/pyOpt_solution.py +++ b/pyoptsparse/pyOpt_solution.py @@ -1,6 +1,9 @@ # Standard Python modules import copy +# External modules +import numpy as np + # Local modules from .pyOpt_optimization import Optimization @@ -49,9 +52,9 @@ def __init__(self, optProb, xStar, fStar, lambdaStar, optInform, info): i += 1 # Now set the f-values - if isinstance(fStar, float) or len(fStar) == 1: - self.objectives[list(self.objectives.keys())[0]].value = float(fStar) - fStar = float(fStar) + if isinstance(fStar, np.ndarray) and len(fStar) == 1: + self.objectives[list(self.objectives.keys())[0]].value = fStar.item() + fStar = fStar.item() else: for f_name, f in self.objectives.items(): f.value = fStar[f_name] diff --git a/pyoptsparse/types.py b/pyoptsparse/pyOpt_types.py similarity index 100% rename from pyoptsparse/types.py rename to pyoptsparse/pyOpt_types.py diff --git a/pyoptsparse/pyOpt_utils.py b/pyoptsparse/pyOpt_utils.py index e67eabb7..7edba239 100644 --- a/pyoptsparse/pyOpt_utils.py +++ b/pyoptsparse/pyOpt_utils.py @@ -8,8 +8,13 @@ mat = {'csr':[rowp, colind, data], 'shape':[nrow, ncols]} # A csr matrix mat = {'csc':[colp, rowind, data], 'shape':[nrow, ncols]} # A csc matrix """ + # Standard Python modules -from typing import Tuple, Union +import importlib +import os +import sys +import types +from typing import Optional, Tuple, Union import warnings # External modules @@ -20,7 +25,7 @@ # Local modules from .pyOpt_error import Error -from .types import ArrayType +from .pyOpt_types import ArrayType # Define index mnemonics IROW = 0 @@ -570,3 +575,42 @@ def _broadcast_to_array(name: str, value: ArrayType, n_values: int, allow_none: if not allow_none and any([i is None for i in value]): raise Error(f"The {name} argument cannot be 'None'.") return value + + +def try_import_compiled_module_from_path( + module_name: str, path: Optional[str] = None, raise_warning: bool = False +) -> Union[types.ModuleType, str]: + """ + Attempt to import a module from a given path. + + Parameters + ---------- + module_name : str + The name of the module + path : Optional[str] + The path to import from. If None, the default ``sys.path`` is used. + raise_warning : bool + If true, raise an import warning. By default false. + + Returns + ------- + Union[types.ModuleType, str] + If importable, the imported module is returned. + If not importable, the error message is instead returned. + """ + orig_path = sys.path + if path is not None: + path = os.path.abspath(os.path.expandvars(os.path.expanduser(path))) + sys.path = [path] + try: + module = importlib.import_module(module_name) + except ImportError as e: + if raise_warning: + warnings.warn( + f"{module_name} module could not be imported from {path}.", + stacklevel=2, + ) + module = str(e) + finally: + sys.path = orig_path + return module diff --git a/pyoptsparse/pyPSQP/pyPSQP.py b/pyoptsparse/pyPSQP/pyPSQP.py index ca495eb8..e69ed8de 100644 --- a/pyoptsparse/pyPSQP/pyPSQP.py +++ b/pyoptsparse/pyPSQP/pyPSQP.py @@ -1,11 +1,6 @@ """ pyPSQP - the pyPSQP wrapper """ -# Compiled module -try: - from . import psqp # isort: skip -except ImportError: - psqp = None # Standard Python modules import datetime import os @@ -17,6 +12,11 @@ # Local modules from ..pyOpt_error import Error from ..pyOpt_optimizer import Optimizer +from ..pyOpt_utils import try_import_compiled_module_from_path + +# import the compiled module +THIS_DIR = os.path.dirname(os.path.abspath(__file__)) +psqp = try_import_compiled_module_from_path("psqp", THIS_DIR) class PSQP(Optimizer): @@ -30,9 +30,8 @@ def __init__(self, raiseError=True, options={}): defOpts = self._getDefaultOptions() informs = self._getInforms() - if psqp is None: - if raiseError: - raise Error("There was an error importing the compiled psqp module") + if isinstance(psqp, str) and raiseError: + raise ImportError(psqp) super().__init__(name, category, defaultOptions=defOpts, informs=informs, options=options) diff --git a/pyoptsparse/pySLSQP/pySLSQP.py b/pyoptsparse/pySLSQP/pySLSQP.py index 58f3ee6e..bb2431b4 100644 --- a/pyoptsparse/pySLSQP/pySLSQP.py +++ b/pyoptsparse/pySLSQP/pySLSQP.py @@ -2,11 +2,7 @@ pySLSQP - A variation of the pySLSQP wrapper specificially designed to work with sparse optimization problems. """ -# Compiled module -try: - from . import slsqp # isort: skip -except ImportError: - slsqp = None + # Standard Python modules import datetime import os @@ -16,8 +12,12 @@ import numpy as np # Local modules -from ..pyOpt_error import Error from ..pyOpt_optimizer import Optimizer +from ..pyOpt_utils import try_import_compiled_module_from_path + +# import the compiled module +THIS_DIR = os.path.dirname(os.path.abspath(__file__)) +slsqp = try_import_compiled_module_from_path("slsqp", THIS_DIR, raise_warning=True) class SLSQP(Optimizer): @@ -30,9 +30,8 @@ def __init__(self, raiseError=True, options={}): category = "Local Optimizer" defOpts = self._getDefaultOptions() informs = self._getInforms() - if slsqp is None: - if raiseError: - raise Error("There was an error importing the compiled slsqp module") + if isinstance(slsqp, str) and raiseError: + raise ImportError(slsqp) self.set_options = [] super().__init__(name, category, defaultOptions=defOpts, informs=informs, options=options) diff --git a/pyoptsparse/pySLSQP/source/slsqp.f b/pyoptsparse/pySLSQP/source/slsqp.f index 07c25a55..ba4c4b72 100644 --- a/pyoptsparse/pySLSQP/source/slsqp.f +++ b/pyoptsparse/pySLSQP/source/slsqp.f @@ -180,11 +180,11 @@ SUBROUTINE SLSQP (M,MEQ,LA,N,X,XL,XU,F,C,G,A,ACC,ITER, 1 JW(L_JW), LA, M, MEQ, MINEQ, MODE, N, N1, IPRINT, IOUT, 2 NFUNC, NGRAD, 3 IEXACT, INCONS, IRESET, ITERMX, LINE, N2, N3 - + DOUBLE PRECISION ACC, A(LA,N+1), C(LA), F, G(N+1), - * X(N), XL(N), XU(N), W(L_W), + * X(N), XL(N), XU(N), W(L_W), * ALPHA, F0, GS, H1, H2, H3, H4, T, T0, TOL - + EXTERNAL SLFUNC,SLGRAD CHARACTER*(*) IFILE @@ -279,9 +279,9 @@ SUBROUTINE SLSQP (M,MEQ,LA,N,X,XL,XU,F,C,G,A,ACC,ITER, * ITERMX,LINE,N1,N2,N3) C IF (ABS(MODE).EQ.1) GOTO 4 -C +C 3 CONTINUE - + C C PRINT FINAL C @@ -297,20 +297,26 @@ SUBROUTINE SLSQP (M,MEQ,LA,N,X,XL,XU,F,C,G,A,ACC,ITER, C ------------------------------------------------------------------ C FORMATS C ------------------------------------------------------------------ -C +C 1000 FORMAT(////,3X, 1 60H------------------------------------------------------------, 2 15H---------------, - 3 /,5X,59HSTART OF THE SEQUENTIAL LEAST SQUARES PROGRAMMING ALGORITHM, - 4 /,3X, - 5 60H------------------------------------------------------------, - 6 15H---------------) - 1100 FORMAT(/,5X,11HPARAMETERS:,/,8X,5HACC =,D13.4,/,8X,9HMAXITER =, - 1 I3,/,8X,8HIPRINT =,I4,/,6HIOUT =,I4//) + 3 /, + 4 5X,13HSTART OF THE , + 5 46HSEQUENTIAL LEAST SQUARES PROGRAMMING ALGORITHM, + 6 /,3X, + 7 60H------------------------------------------------------------, + 8 15H---------------) + 1100 FORMAT(/,5X, + 1 11HPARAMETERS:, + 2 /,8X,5HACC =,D13.4, + 3 /,8X,9HMAXITER =,I4, + 4 /,8X,8HIPRINT =,I4, + 5 /,8X,6HIOUT =,I4//) 1200 FORMAT(5X,6HITER =,I5,5X,5HOBJ =,7E16.8,5X,10HX-VECTOR =) - 1400 FORMAT (3X,7E13.4) + 1400 FORMAT(3X,7E13.4) 1450 FORMAT(8X,30HNUMBER OF FUNC-CALLS: NFUNC =,I4) 1460 FORMAT(8X,30HNUMBER OF GRAD-CALLS: NGRAD =,I4) C END - + diff --git a/pyoptsparse/pySNOPT/pySNOPT.py b/pyoptsparse/pySNOPT/pySNOPT.py index bac40403..5354f78e 100644 --- a/pyoptsparse/pySNOPT/pySNOPT.py +++ b/pyoptsparse/pySNOPT/pySNOPT.py @@ -2,6 +2,7 @@ pySNOPT - A variation of the pySNOPT wrapper specifically designed to work with sparse optimization problems. """ + # Standard Python modules import datetime import os @@ -9,50 +10,32 @@ import sys import time from typing import Any, Dict, Optional, Tuple -import warnings # External modules -from baseclasses.utils import CaseInsensitiveSet +from baseclasses.utils import CaseInsensitiveSet, writePickle import numpy as np from numpy import ndarray +from packaging.version import parse as parse_version # Local modules from ..pyOpt_error import Error from ..pyOpt_optimization import Optimization from ..pyOpt_optimizer import Optimizer -from ..pyOpt_utils import ICOL, IDATA, INFINITY, IROW, extractRows, mapToCSC, scaleRows - - -def _import_snopt_from_path(path): - """Attempt to import snopt from a specific path. Return the loaded module, or `None` if snopt cannot be imported.""" - path = os.path.abspath(os.path.expandvars(os.path.expanduser(path))) - orig_path = sys.path - sys.path = [path] - try: - import snopt # isort: skip - except ImportError: - warnings.warn( - f"`snopt` module could not be imported from {path}.", - ImportWarning, - stacklevel=2, - ) - snopt = None - finally: - sys.path = orig_path - return snopt - - -# Compiled module -_IMPORT_SNOPT_FROM = os.environ.get("PYOPTSPARSE_IMPORT_SNOPT_FROM", None) -if _IMPORT_SNOPT_FROM is not None: - # if a specific import path is specified, attempt to load SNOPT from it - snopt = _import_snopt_from_path(_IMPORT_SNOPT_FROM) -else: - # otherwise, load it relative to this file - try: - from . import snopt # isort: skip - except ImportError: - snopt = None +from ..pyOpt_utils import ( + ICOL, + IDATA, + INFINITY, + IROW, + extractRows, + mapToCSC, + scaleRows, + try_import_compiled_module_from_path, +) + +# import the compiled module +THIS_DIR = os.path.dirname(os.path.abspath(__file__)) +_IMPORT_SNOPT_FROM = os.environ.get("PYOPTSPARSE_IMPORT_SNOPT_FROM", THIS_DIR) +snopt = try_import_compiled_module_from_path("snopt", _IMPORT_SNOPT_FROM) class SNOPT(Optimizer): @@ -77,15 +60,17 @@ def __init__(self, raiseError=True, options: Dict = {}): { "Save major iteration variables", "Return work arrays", + "Work arrays save file", "snSTOP function handle", + "snSTOP arguments", } ) informs = self._getInforms() - if snopt is None: + if isinstance(snopt, str): if raiseError: - raise Error("There was an error importing the compiled snopt module") + raise ImportError(snopt) else: version = None else: @@ -135,7 +120,9 @@ def _getDefaultOptions() -> Dict[str, Any]: "Total real workspace": [int, None], "Save major iteration variables": [list, []], "Return work arrays": [bool, False], + "Work arrays save file": [(type(None), str), None], "snSTOP function handle": [(type(None), type(lambda: None)), None], + "snSTOP arguments": [list, []], } return defOpts @@ -278,6 +265,9 @@ def __call__( self.startTime = time.time() self.callCounter = 0 self.storeSens = storeSens + # flush the output streams + sys.stdout.flush() + sys.stderr.flush() # Store the starting time if the keyword timeLimit is given: self.timeLimit = timeLimit @@ -404,6 +394,7 @@ def __call__( self.setOption("Total real workspace", lenrw) cw = np.empty((lencw, 8), dtype="|S1") + cw[:] = " " iw = np.zeros(leniw, np.intc) rw = np.zeros(lenrw, float) snopt.sninit(iPrint, iSumm, cw, iw, rw) @@ -463,11 +454,6 @@ def __call__( start = np.array(self.getOption("Start")) ObjAdd = np.array(0.0, float) ProbNm = np.array(self.optProb.name, "c") - cdummy = -1111111 # this is a magic variable defined in SNOPT for undefined strings - cw[51, :] = cdummy # we set these to cdummy so that a placeholder is used in printout - cw[52, :] = cdummy - cw[53, :] = cdummy - cw[54, :] = cdummy xs = np.concatenate((xs, np.zeros(ncon, float))) bl = np.concatenate((blx, blc)) bu = np.concatenate((bux, buc)) @@ -520,6 +506,12 @@ def __call__( sol_inform["text"] = self.informs[inform] # Create the optimization solution + if parse_version(self.version) > parse_version("7.7.0") and parse_version(self.version) < parse_version( + "7.7.7" + ): + # SNOPT obj value is buggy and returned as 0, its thus overwritten with the solution objective value + obj = np.array([obj.value * obj.scale for obj in self.optProb.objectives.values()]) + sol = self._createSolution(optTime, sol_inform, obj, xs[:nvar], multipliers=pi) restartDict = { "cw": cw, @@ -530,6 +522,8 @@ def __call__( "pi": pi, } + self._on_flushFiles() + else: # We are not on the root process so go into waiting loop: self._waitLoop() restartDict = None @@ -576,10 +570,7 @@ def _userfg_wrap(self, mode, nnJac, x, fobj, gobj, fcon, gcon, nState, cu, iu, r elif fail == 2: mode = -2 - # Flush the files to the buffer for all the people who like to - # monitor the residual - snopt.pyflush(self.getOption("iPrint")) - snopt.pyflush(self.getOption("iSumm")) + self._on_flushFiles() # Check if we've exceeded the timeLimit if self.timeLimit is not None: @@ -680,12 +671,39 @@ def _snstop(self, ktcond, mjrprtlvl, minimize, n, nncon, nnobj, ns, itn, nmajor, if "funcs" in self.cache.keys(): iterDict["funcs"].update(self.cache["funcs"]) + # Create the restart dictionary to be passed to snstop_handle + restartDict = { + "cw": cw, + "iw": iw, + "rw": rw, + "xs": x, # x is the same as xs; we call it x here to be consistent with the SNOPT subroutine snSTOP + "hs": hs, + "pi": pi, + } + + workArraysSave = self.getOption("Work arrays save file") + if workArraysSave is not None: + # Save the restart dictionary + writePickle(workArraysSave, restartDict) + # perform callback if requested snstop_handle = self.getOption("snSTOP function handle") if snstop_handle is not None: + + # Get the arguments to pass in to snstop_handle + # iterDict is always included + snstopArgs = [iterDict] + for snstopArg in self.getOption("snSTOP arguments"): + if snstopArg == "restartDict": + snstopArgs.append(restartDict) + else: + raise Error(f"Received unknown snSTOP argument {snstopArg}. " + + "Please see 'snSTOP arguments' option in the pyOptSparse documentation " + + "under 'SNOPT'.") + if not self.storeHistory: raise Error("snSTOP function handle must be used with storeHistory=True") - iabort = snstop_handle(iterDict) + iabort = snstop_handle(*snstopArgs) # write iterDict again if anything was inserted if self.storeHistory and callCounter is not None: self.hist.write(callCounter, iterDict) @@ -714,11 +732,11 @@ def _set_snopt_options(self, iPrint: int, iSumm: int, cw: ndarray, iw: ndarray, if name == "Problem Type": snopt.snset(value, iPrint, iSumm, inform, cw, iw, rw) elif name == "Print file": - snopt.snset(name + " " + f"{iPrint}", iPrint, iSumm, inform, cw, iw, rw) + snopt.snset(f"{name} {iPrint}", iPrint, iSumm, inform, cw, iw, rw) elif name == "Summary file": - snopt.snset(name + " " + f"{iSumm}", iPrint, iSumm, inform, cw, iw, rw) + snopt.snset(f"{name} {iSumm}", iPrint, iSumm, inform, cw, iw, rw) else: - snopt.snset(name + " " + value, iPrint, iSumm, inform, cw, iw, rw) + snopt.snset(f"{name} {value}", iPrint, iSumm, inform, cw, iw, rw) elif isinstance(value, float): snopt.snsetr(name, value, iPrint, iSumm, inform, cw, iw, rw) elif isinstance(value, int): diff --git a/setup.py b/setup.py index d7860e7d..205c1416 100644 --- a/setup.py +++ b/setup.py @@ -102,9 +102,10 @@ def copy_shared_libraries(): platforms=["Linux"], keywords="optimization", install_requires=[ + "packaging", "sqlitedict>=1.6", - "numpy>=1.16", - "scipy>1.2", + "numpy>=1.21,<2", + "scipy>=1.7", "mdolab-baseclasses>=1.3.1", ], extras_require={ @@ -134,7 +135,7 @@ def copy_shared_libraries(): package_data={ "": ["*.so", "*.lib", "*.pyd", "*.pdb", "*.dylib", "assets/*", "LICENSE"], }, - python_requires=">=3.7", + python_requires=">=3.9", entry_points={ "gui_scripts": [ "optview = pyoptsparse.postprocessing.OptView:main", diff --git a/tests/test_hs015.py b/tests/test_hs015.py index a7870044..27ee8fd3 100644 --- a/tests/test_hs015.py +++ b/tests/test_hs015.py @@ -1,9 +1,11 @@ """Test solution of problem HS15 from the Hock & Schittkowski collection""" # Standard Python modules +import os import unittest # External modules +from baseclasses.utils import readPickle, writePickle import numpy as np from parameterized import parameterized @@ -193,6 +195,106 @@ def test_snopt_snstop(self): # we should get 70/74 self.assert_inform_equal(sol, optInform=74) + def test_snopt_snstop_restart(self): + pickleFile = "restart.pickle" + + def my_snstop_restart(iterDict, restartDict): + # Save the restart dictionary + writePickle(pickleFile, restartDict) + + # Exit after 5 major iterations + if iterDict["nMajor"] == 5: + return 1 + + return 0 + + # Run the optimization for 5 major iterations + self.optName = "SNOPT" + self.setup_optProb() + optOptions = { + "snSTOP function handle": my_snstop_restart, + "snSTOP arguments": ["restartDict"], + } + sol = self.optimize(optOptions=optOptions, storeHistory=True) + + # Check that the optimization exited with 74 + self.assert_inform_equal(sol, optInform=74) + + # Read the restart dictionary pickle file saved by snstop + restartDict = readPickle(pickleFile) + + # Now optimize again but using the restart dictionary + self.setup_optProb() + opt = OPT( + self.optName, + options={ + "Start": "Hot", + "Verify level": -1, + "snSTOP function handle": my_snstop_restart, + "snSTOP arguments": ["restartDict"], + }, + ) + histFile = "restart.hst" + sol = opt(self.optProb, sens=self.sens, storeHistory=histFile, restartDict=restartDict) + + # Check that the optimization converged in fewer than 5 more major iterations + self.assert_solution_allclose(sol, 1e-12) + self.assert_inform_equal(sol, optInform=1) + + # Delete the pickle and history files + os.remove(pickleFile) + os.remove(histFile) + + def test_snopt_work_arrays_save(self): + # Run the optimization for 5 major iterations + self.optName = "SNOPT" + self.setup_optProb() + pickleFile = "work_arrays_save.pickle" + optOptions = { + "snSTOP function handle": self.my_snstop, + "Work arrays save file": pickleFile, + } + sol = self.optimize(optOptions=optOptions, storeHistory=True) + + # Read the restart dictionary pickle file saved by snstop + restartDict = readPickle(pickleFile) + + # Now optimize again but using the restart dictionary + self.setup_optProb() + opt = OPT( + self.optName, + options={ + "Start": "Hot", + "Verify level": -1, + }, + ) + histFile = "work_arrays_save.hst" + sol = opt(self.optProb, sens=self.sens, storeHistory=histFile, restartDict=restartDict) + + # Check that the optimization converged + self.assert_solution_allclose(sol, 1e-12) + self.assert_inform_equal(sol, optInform=1) + + # Delete the pickle and history files + os.remove(pickleFile) + os.remove(histFile) + + def test_snopt_failed_initial(self): + def failed_fun(x_dict): + funcs = {"obj": 0.0, "con": [np.nan, np.nan]} + fail = True + return funcs, fail + + self.optName = "SNOPT" + self.setup_optProb() + # swap obj to report NaN + self.optProb.objFun = failed_fun + sol = self.optimize(optOptions={}, storeHistory=True) + self.assert_inform_equal(sol, optInform=61) + # make sure empty history does not error out + hist = History(self.histFileName, flag="r") + hist.getValues() + if __name__ == "__main__": unittest.main() diff --git a/tests/test_lincon_error.py b/tests/test_lincon_error.py new file mode 100644 index 00000000..7af5723b --- /dev/null +++ b/tests/test_lincon_error.py @@ -0,0 +1,47 @@ +""" +Tests that pyOptSparse raises an error when a user-defined obj/con function returns a linear constraint value +(which should not because linear constraint is defined exclusively by jac and bounds) +""" + +# Standard Python modules +import unittest + +# First party modules +from pyoptsparse import SLSQP, Optimization +from pyoptsparse.pyOpt_error import Error + + +def objfunc(xdict): + """Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3""" + x = xdict["x"] + funcs = {} + + funcs["obj"] = x**2 + funcs["con"] = x - 1 # falsely return a linear constraint value + + fail = False + return funcs, fail + + +class TestLinearConstraintCheck(unittest.TestCase): + def test(self): + # define an optimization problem with a linear constraint + optProb = Optimization("test", objfunc) + optProb.addVarGroup("x", 1, value=1) + optProb.addObj("obj") + optProb.addConGroup("con", 1, lower=1.0, linear=True, wrt=["x"], jac={"x": [1.0]}) + + opt = SLSQP() + with self.assertRaises(Error) as context: + opt(optProb, sens="FD") + + # check if we get the expected error message + err_msg = ( + "Value for linear constraint returned from user obj function. Linear constraints " + + "are evaluated internally and should not be returned from the user's function." + ) + self.assertEqual(err_msg, str(context.exception)) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_optimizer.py b/tests/test_optimizer.py new file mode 100644 index 00000000..2e897f6d --- /dev/null +++ b/tests/test_optimizer.py @@ -0,0 +1,272 @@ +"""Test for Optimizer""" + +# Standard Python modules +import unittest + +# External modules +import numpy as np +from parameterized import parameterized + +# First party modules +from pyoptsparse import OPT, Optimization + +MASTERFUNC_OUTPUTS = ["fobj", "fcon", "gobj", "gcon"] + + +class TestOptimizer(unittest.TestCase): + tol = 1e-12 + + def get_objfunc(self, failFlag=False): + """ + Return an objfunc callable function where we can choose whether + the fail flag will be returned as True or False. + """ + # Initialize iters to infinite so the fail flag is never thrown on setup + iters = np.inf + + def objfunc(xdict): + """ + This is a simple quadratic test function with linear constraints. + The actual problem doesn't really matter, since we are not testing optimization, + but just optProb. However, we need to initialize and run an optimization + in order to have optimizer-specific fields in optProb populated, such as + jacIndices. + + This problem is probably not feasible, but that's okay. + + Reset the iteration counter with a special call that includes + a nonfinite value in the design variable vector. + """ + funcs = {} + funcs["obj_0"] = 0 + for x in xdict.keys(): + funcs["obj_0"] += np.sum(np.power(xdict[x], 2)) + for iCon, nc in enumerate(self.nCon): + conName = f"con_{iCon}" + funcs[conName] = np.zeros(nc) + for x in xdict.keys(): + for j in range(nc): + funcs[conName][j] = (iCon + 1) * np.sum(xdict[x]) + + # Throw the fail flag if it's in the specified range or True + nonlocal iters + if isinstance(failFlag, tuple): + if not len(failFlag) == 2: + raise ValueError("Fail flag must be a tuple of (iter start fail, iter end fail) or a boolean") + fail = failFlag[0] <= iters < failFlag[1] + elif isinstance(failFlag, bool): + fail = failFlag + else: + raise ValueError("Fail flag must be a tuple of (iter start fail, iter end fail) or a boolean") + iters += 1 + + # Reset iteration counter if any non-finite values in DV dict + for xVec in xdict.values(): + if not np.all(np.isfinite(xVec)): + iters = 0 + break + return funcs, fail + + return objfunc + + def setup_optProb(self, failFlag=False, nObj=1, nDV=[4], nCon=[2]): + """ + This function sets up a general optimization problem, with arbitrary + DVs, constraints and objectives. + Arbitrary scaling for the various parameters can also be specified. + """ + self.nObj = nObj + self.nDV = nDV + self.nCon = nCon + + # Optimization Object + self.optProb = Optimization("Configurable Test Problem", self.get_objfunc(failFlag=failFlag)) + self.x0 = {} + # Design Variables + for iDV in range(len(nDV)): + n = nDV[iDV] + x0 = np.ones(n) + dvName = f"x{iDV}" + self.x0[dvName] = x0 + self.optProb.addVarGroup( + dvName, + n, + lower=-1, + upper=1, + value=x0, + ) + + # Constraints + for iCon in range(len(nCon)): + nc = nCon[iCon] + self.optProb.addConGroup( + f"con_{iCon}", + nc, + lower=-5, + upper=5, + ) + + # Objective + for iObj in range(nObj): + self.optProb.addObj(f"obj_{iObj}") + + # Finalize + self.optProb.printSparsity() + # create and store optimizer + self.opt = OPT("slsqp", options={"IFILE": "optProb_SLSQP.out"}) + self.opt(self.optProb, sens="FD") + + # Call the masterFunc with some infinite DVs so it resets iters + self.opt._masterFunc(np.full(np.sum(nDV), np.inf), ["fobj"]) + + def test_masterFunc_fobj_fail(self): + """ + Test that if the objective fails when _masterFunc is called, + the fail flag is returned with the expected value. + """ + nDV = [4] + self.setup_optProb(failFlag=(1, 100), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + + # Do not fail + _, fail = self.opt._masterFunc(x, ["fobj"]) + self.assertFalse(fail) + + # Should fail on the second function call + x += 1 # change x so it doesn't use the cache + _, fail = self.opt._masterFunc(x, ["fobj"]) + self.assertTrue(fail) + + @parameterized.expand(MASTERFUNC_OUTPUTS) + def test_masterFunc_output_fail_cache(self, output): + """ + Test that if an output fails when _masterFunc is called + and it is then called again with the same x vector, + the fail flag is returned with the expected value. + """ + nDV = [4] + # Set fail flag to (0, 1) so we know for sure that it's using + # the cache since the only failure is on the first call + self.setup_optProb(failFlag=(0, 1), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + + # Fail + _, fail = self.opt._masterFunc(x, [output]) + self.assertTrue(fail) + + # Should fail with the same x vector using the cache + _, fail = self.opt._masterFunc(x, [output]) + self.assertTrue(fail) + + # Do the same thing one more time to make sure the cache is really really working + _, fail = self.opt._masterFunc(x, [output]) + self.assertTrue(fail) + + def test_masterFunc_gobj_fail_cache(self): + """ + Test that if the gradient fails when _masterFunc is called + and it is then called again with the same x vector, + the fail flag is returned with the expected value. + """ + nDV = [4] + self.setup_optProb(failFlag=True, nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + + # Fail + _, _, fail = self.opt._masterFunc(x, ["gcon", "gobj"]) + self.assertTrue(fail) + + # Should fail with the same x vector using the cache + _, fail = self.opt._masterFunc(x, ["gobj"]) + self.assertTrue(fail) + + def test_masterFunc_fobj_fcon_cache_fail(self): + """ + Test that if the objective fails when _masterFunc is called + and then the constraints are called, it still returns a failure. + """ + nDV = [4] + self.setup_optProb(failFlag=(1, 100), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + + # Do not fail + _, fail = self.opt._masterFunc(x, ["fobj"]) + self.assertFalse(fail) + + # Check that the cached value does not fail either + _, fail = self.opt._masterFunc(x, ["fcon"]) + self.assertFalse(fail) + + # Should fail on the second function call + x += 1 # change x so it doesn't use the cache + _, fail = self.opt._masterFunc(x, ["fobj"]) + self.assertTrue(fail) + + # Check that the cached value now fails too + _, fail = self.opt._masterFunc(x, ["fcon"]) + self.assertTrue(fail) + + def test_masterFunc_fail_then_success(self): + """ + Test that if the objective/constraint fails when _masterFunc is called + and then it succeeds, the fail flag is no longer true. + """ + nDV = [4, 5] + self.setup_optProb(failFlag=(0, 1), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + + # Fail + _, _, fail = self.opt._masterFunc(x, ["fobj", "fcon"]) + self.assertTrue(fail) + + # Should succeed on the second call + x += 1 # change x so it doesn't use the cache + _, _, fail = self.opt._masterFunc(x, ["fobj", "fcon"]) + self.assertFalse(fail) + + def test_masterFunc_fail_grad_after_fail_func(self): + """ + Test that if the _masterFunc is called to compute the gradients on + an x that isn't in the cache and the primal fails, it returns a + fail flag for the gradient too. + """ + nDV = [4, 5] + self.setup_optProb(failFlag=True, nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + 5 + + # Fail obj gradient on DVs that haven't been evaluated when the primal fails + _, fail = self.opt._masterFunc(x, ["gobj"]) + self.assertTrue(fail) + + # Fail con gradient on DVs that haven't been evaluated when the primal fails + x += 1 + _, fail = self.opt._masterFunc(x, ["gcon"]) + self.assertTrue(fail) + + def test_masterFunc_succeed_grad_after_fail_func(self): + """ + Test that if the _masterFunc is called to compute the gradients on + an x that is in the cache and the primal fails, it returns a + False fail flag for the gradient. + """ + nDV = [4, 5] + self.setup_optProb(failFlag=(0, 1), nDV=nDV) + + x = np.ones(np.sum(nDV), dtype=float) + 5 + + _, fail = self.opt._masterFunc(x, ["fobj"]) # call primal to put x in the cache + self.assertTrue(fail) + + # Gradient succeeds even though primal failed + _, _, fail = self.opt._masterFunc(x, ["gobj", "gcon"]) + self.assertFalse(fail) + + +if __name__ == "__main__": + unittest.main() diff --git a/tests/test_other.py b/tests/test_other.py index 98d0996d..f0c59b2c 100644 --- a/tests/test_other.py +++ b/tests/test_other.py @@ -3,20 +3,25 @@ import sys import unittest -# we have to unset this environment variable because otherwise when we import `_import_snopt_from_path` +# First party modules +from pyoptsparse.pyOpt_utils import try_import_compiled_module_from_path + +# we have to unset this environment variable because otherwise # the snopt module gets automatically imported, thus failing the import test below os.environ.pop("PYOPTSPARSE_IMPORT_SNOPT_FROM", None) -# First party modules -from pyoptsparse.pySNOPT.pySNOPT import _import_snopt_from_path # noqa: E402 - class TestImportSnoptFromPath(unittest.TestCase): def test_nonexistent_path(self): - with self.assertWarns(ImportWarning): - self.assertIsNone(_import_snopt_from_path("/a/nonexistent/path")) + # first unload `snopt` from namespace + for key in list(sys.modules.keys()): + if "snopt" in key: + sys.modules.pop(key) + with self.assertWarns(UserWarning): + module = try_import_compiled_module_from_path("snopt", "/a/nonexistent/path", raise_warning=True) + self.assertTrue(isinstance(module, str)) def test_sys_path_unchanged(self): path = tuple(sys.path) - _import_snopt_from_path("/some/path") + try_import_compiled_module_from_path("snopt", "/some/path") self.assertEqual(tuple(sys.path), path) diff --git a/tests/test_require_mpi_env_var.py b/tests/test_require_mpi_env_var.py index 15e7ee25..ca95c0ed 100644 --- a/tests/test_require_mpi_env_var.py +++ b/tests/test_require_mpi_env_var.py @@ -2,14 +2,9 @@ import importlib import inspect import os -import sys import unittest # isort: off -if sys.version_info[0] == 2: - reload_func = reload # noqa: F821 -else: - reload_func = importlib.reload try: HAS_MPI = True @@ -26,14 +21,14 @@ def test_require_mpi(self): os.environ["PYOPTSPARSE_REQUIRE_MPI"] = "1" import pyoptsparse.pyOpt_MPI - reload_func(pyoptsparse.pyOpt_MPI) + importlib.reload(pyoptsparse.pyOpt_MPI) self.assertTrue(inspect.ismodule(pyoptsparse.pyOpt_MPI.MPI)) def test_no_mpi_requirement_given(self): os.environ.pop("PYOPTSPARSE_REQUIRE_MPI", None) import pyoptsparse.pyOpt_MPI - reload_func(pyoptsparse.pyOpt_MPI) + importlib.reload(pyoptsparse.pyOpt_MPI) if HAS_MPI: self.assertTrue(inspect.ismodule(pyoptsparse.pyOpt_MPI.MPI)) else: @@ -43,7 +38,7 @@ def test_do_not_use_mpi(self): os.environ["PYOPTSPARSE_REQUIRE_MPI"] = "0" import pyoptsparse.pyOpt_MPI - reload_func(pyoptsparse.pyOpt_MPI) + importlib.reload(pyoptsparse.pyOpt_MPI) self.assertFalse(inspect.ismodule(pyoptsparse.pyOpt_MPI.MPI)) @@ -60,22 +55,22 @@ def test_require_mpi_check_paropt(self): os.environ["PYOPTSPARSE_REQUIRE_MPI"] = "1" import pyoptsparse.pyParOpt.ParOpt - reload_func(pyoptsparse.pyParOpt.ParOpt) + importlib.reload(pyoptsparse.pyParOpt.ParOpt) self.assertIsNotNone(pyoptsparse.pyParOpt.ParOpt._ParOpt) def test_no_mpi_requirement_given_check_paropt(self): os.environ.pop("PYOPTSPARSE_REQUIRE_MPI", None) import pyoptsparse.pyParOpt.ParOpt - reload_func(pyoptsparse.pyParOpt.ParOpt) + importlib.reload(pyoptsparse.pyParOpt.ParOpt) self.assertIsNotNone(pyoptsparse.pyParOpt.ParOpt._ParOpt) def test_do_not_use_mpi_check_paropt(self): os.environ["PYOPTSPARSE_REQUIRE_MPI"] = "0" import pyoptsparse.pyParOpt.ParOpt - reload_func(pyoptsparse.pyParOpt.ParOpt) - self.assertIsNone(pyoptsparse.pyParOpt.ParOpt._ParOpt) + importlib.reload(pyoptsparse.pyParOpt.ParOpt) + self.assertTrue(isinstance(pyoptsparse.pyParOpt.ParOpt._ParOpt, str)) if __name__ == "__main__": diff --git a/tests/test_snopt_bugfix.py b/tests/test_snopt_bugfix.py index e6460452..12ffe28a 100644 --- a/tests/test_snopt_bugfix.py +++ b/tests/test_snopt_bugfix.py @@ -12,7 +12,6 @@ # First party modules from pyoptsparse import SNOPT, Optimization -from pyoptsparse.pyOpt_error import Error def objfunc(xdict): @@ -22,35 +21,6 @@ def objfunc(xdict): funcs = {} funcs["obj"] = (x - 3.0) ** 2 + x * y + (y + 4.0) ** 2 - 3.0 - conval = -x + y - funcs["con"] = conval - - fail = False - return funcs, fail - - -def objfunc_no_con(xdict): - """Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3""" - x = xdict["x"] - y = xdict["y"] - funcs = {} - - funcs["obj"] = (x - 3.0) ** 2 + x * y + (y + 4.0) ** 2 - 3.0 - - fail = False - return funcs, fail - - -def objfunc_2con(xdict): - """Evaluates the equation f(x,y) = (x-3)^2 + xy + (y+4)^2 - 3""" - x = xdict["x"] - y = xdict["y"] - funcs = {} - - funcs["obj"] = (x - 3.0) ** 2 + x * y + (y + 4.0) ** 2 - 3.0 - conval = -x + y - funcs["con"] = conval * np.ones(2) - funcs["con2"] = (conval + 1) * np.ones(3) fail = False return funcs, fail @@ -104,10 +74,8 @@ def test_opt(self): # Optimizer try: opt = SNOPT(options=optOptions) - except Error as e: - if "There was an error importing" in e.message: - raise unittest.SkipTest("Optimizer not available: SNOPT") - raise e + except ImportError: + raise unittest.SkipTest("Optimizer not available: SNOPT") sol = opt(optProb, sens=sens) @@ -118,7 +86,7 @@ def test_opt(self): def test_opt_bug1(self): # Due to a new feature, there is a TypeError when you optimize a model without a constraint. - optProb = Optimization("Paraboloid", objfunc_no_con) + optProb = Optimization("Paraboloid", objfunc) # Design Variables optProb.addVarGroup("x", 1, varType="c", lower=-50.0, upper=50.0, value=0.0) @@ -137,16 +105,14 @@ def test_opt_bug1(self): # Optimizer try: opt = SNOPT(options=optOptions) - except Error as e: - if "There was an error importing" in e.message: - raise unittest.SkipTest("Optimizer not available: SNOPT") - raise e + except ImportError: + raise unittest.SkipTest("Optimizer not available: SNOPT") opt(optProb, sens=sens) def test_opt_bug_print_2con(self): # Optimization Object - optProb = Optimization("Paraboloid", objfunc_2con) + optProb = Optimization("Paraboloid", objfunc) # Design Variables optProb.addVarGroup("x", 1, varType="c", lower=-50.0, upper=50.0, value=0.0) @@ -180,10 +146,8 @@ def test_opt_bug_print_2con(self): # Optimizer try: opt = SNOPT(options=optOptions) - except Error as e: - if "There was an error importing" in e.message: - raise unittest.SkipTest("Optimizer not available: SNOPT") - raise e + except ImportError: + raise unittest.SkipTest("Optimizer not available: SNOPT") sol = opt(optProb, sens=sens) diff --git a/tests/test_tp109.py b/tests/test_tp109.py index efaacbce..48a6b5d2 100644 --- a/tests/test_tp109.py +++ b/tests/test_tp109.py @@ -176,7 +176,7 @@ def test_snopt(self): def test_snopt_informs(self): self.optName = "SNOPT" self.setup_optProb() - sol = self.optimize(optOptions={"Time Limit": 1e-5}) + sol = self.optimize(optOptions={"Time Limit": 1e-15}) self.assert_inform_equal(sol, 34) def test_slsqp(self): diff --git a/tests/test_user_termination.py b/tests/test_user_termination.py index ca40bad9..017255f3 100644 --- a/tests/test_user_termination.py +++ b/tests/test_user_termination.py @@ -14,7 +14,6 @@ # First party modules from pyoptsparse import OPT, Optimization -from pyoptsparse.pyOpt_error import Error class TerminateComp: @@ -31,8 +30,6 @@ def objfunc(self, xdict): funcs = {} funcs["obj"] = (x - 3.0) ** 2 + x * y + (y + 4.0) ** 2 - 3.0 - conval = -x + y - funcs["con"] = conval if self.obj_count > self.max_obj: fail = 2 @@ -105,10 +102,8 @@ def test_obj(self, optName): try: opt = OPT(optName, options=optOptions) - except Error as e: - if "There was an error importing" in e.message: - raise unittest.SkipTest(f"Optimizer not available: {optName}") - raise e + except ImportError: + raise unittest.SkipTest(f"Optimizer not available: {optName}") sol = opt(optProb, sens=termcomp.sens) @@ -128,10 +123,8 @@ def test_sens(self, optName): try: opt = OPT(optName, options=optOptions) - except Error as e: - if "There was an error importing" in e.message: - raise unittest.SkipTest("Optimizer not available: SNOPT") - raise e + except ImportError: + raise unittest.SkipTest(f"Optimizer not available: {optName}") sol = opt(optProb, sens=termcomp.sens) diff --git a/tests/testing_utils.py b/tests/testing_utils.py index 84fc2d37..fa94cb06 100644 --- a/tests/testing_utils.py +++ b/tests/testing_utils.py @@ -9,7 +9,6 @@ # First party modules from pyoptsparse import OPT, History -from pyoptsparse.pyOpt_error import Error def assert_optProb_size(optProb, nObj, nDV, nCon): @@ -118,11 +117,16 @@ def assert_solution_allclose(self, sol, tol, partial_x=False): else: # assume we have a single solution self.sol_index = 0 + # now we assert against the closest solution # objective assert_allclose(sol.fStar, self.fStar[self.sol_index], atol=tol, rtol=tol) # make sure fStar and sol.objectives values match - assert_allclose(sol.fStar, [obj.value for obj in sol.objectives.values()], rtol=1e-12) + # NOTE this is not true in general, but true for well-behaving optimizations + # which should be the case for all tests + sol_objectives = np.array([obj.value for obj in sol.objectives.values()]) + assert_allclose(sol.fStar, sol_objectives, rtol=1e-12) + # x assert_dict_allclose(sol.xStar, self.xStar[self.sol_index], atol=tol, rtol=tol, partial=partial_x) dv = sol.getDVs() @@ -137,6 +141,9 @@ def assert_solution_allclose(self, sol, tol, partial_x=False): ): assert_dict_allclose(sol.lambdaStar, self.lambdaStar[self.sol_index], atol=tol, rtol=tol) + # test printing solution + print(sol) + def assert_inform_equal(self, sol, optInform=None): """ Check that the optInform stored in the Solution object is as expected. @@ -226,7 +233,7 @@ def optimize(self, sens=None, setDV=None, optOptions=None, storeHistory=False, h try: opt = OPT(self.optName, options=optOptions) self.optVersion = opt.version - except Error as e: + except ImportError as e: if self.optName in DEFAULT_OPTIMIZERS: raise e else: