Skip to content

Commit

Permalink
Allow for user provided gradient in maximize and minimize (#158)
Browse files Browse the repository at this point in the history
* Fix numba deprecation warning.

* Allow for user provided gradient.

* Bump version.

* Add test.

* More tests.

* Remove comment.

* Trigger run.

* Allow gradient_kwargs.

* Now also works for maximization.

* Improve results processing.

* Update changes.rst

* Restructure decorator.

* Fix.
  • Loading branch information
janosg authored Apr 22, 2020
1 parent dc5d777 commit 6ec5f85
Show file tree
Hide file tree
Showing 15 changed files with 315 additions and 46 deletions.
6 changes: 6 additions & 0 deletions CHANGES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,12 @@ releases are available on `Anaconda.org
<https://anaconda.org/OpenSourceEconomics/estimagic>`_.


0.0.30 - 2020-04-22
-------------------

- :gh:`158` allows to specify a gradient in maximize and minimize (:ghuser:`janosg`)


0.0.29 - 2020-04-16
-------------------

Expand Down
2 changes: 1 addition & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ def setup(app):
# built documents.
#
# The full version, including alpha/beta/rc tags.
release = "0.0.29"
release = "0.0.30"
version = ".".join(release.split(".")[:2])

# The language for content autogenerated by Sphinx. Refer to documentation
Expand Down
106 changes: 106 additions & 0 deletions docs/source/how_to/closed_form_gradient.ipynb
Original file line number Diff line number Diff line change
@@ -0,0 +1,106 @@
{
"cells": [
{
"cell_type": "markdown",
"metadata": {},
"source": [
"# Use a closed-form gradient in `maximize` or `minimize`"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"from estimagic import minimize\n",
"import pandas as pd\n",
"import numpy as np"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"def sum_of_squares(params):\n",
" return (params[\"value\"] ** 2).sum()\n",
"\n",
"\n",
"def sum_of_squares_gradient(params):\n",
" return params[\"value\"].to_numpy() * 2\n",
"\n",
"start_params = pd.DataFrame()\n",
"start_params[\"value\"] = [1, 2.5, - 1]"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [],
"source": [
"info, params = minimize(\n",
" criterion=sum_of_squares, \n",
" params=start_params, \n",
" algorithm=\"scipy_L-BFGS-B\", \n",
" gradient=sum_of_squares_gradient\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {},
"outputs": [
{
"data": {
"text/plain": [
"{'status': 'success',\n",
" 'message': b'CONVERGENCE: NORM_OF_PROJECTED_GRADIENT_<=_PGTOL',\n",
" 'x': [0.0, 0.0, 0.0],\n",
" 'success': True,\n",
" 'fitness': 0.0,\n",
" 'n_evaluations': 3,\n",
" 'jacobian': array([0., 0., 0.]),\n",
" 'hessian': None,\n",
" 'n_evaluations_jacobian': None,\n",
" 'n_evaluations_hessian': None,\n",
" 'n_iterations': 2,\n",
" 'max_constraints_violations': None,\n",
" 'hessian_inverse': <3x3 LbfgsInvHessProduct with dtype=float64>}"
]
},
"execution_count": 10,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"info"
]
}
],
"metadata": {
"kernelspec": {
"display_name": "Python 3",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.7.6"
}
},
"nbformat": 4,
"nbformat_minor": 2
}
17 changes: 17 additions & 0 deletions docs/source/how_to/index.rst
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
=============
How-To Guides
=============

Here we collect user written How-To Guides. How-To Guides come in the form of a jupyter
notebook. They should be:

- Goal oriented: Only describe what is necessary to achieve a goal. If you want, you can
add links to background information at the end.
- Have minimal setups: If you show how to maximize a function, the function
should be a one-liner and not a likelihood function that requires a lot of background
information.

.. toctree::
:maxdepth: 1

closed_form_gradient.ipynb
1 change: 1 addition & 0 deletions docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ models in mind, but also "scales down" to simpler use cases like logit or probit
differentiation/index
inference/index
visualization/index
how_to/index
faq
api/index
contributing/index
Expand Down
2 changes: 1 addition & 1 deletion estimagic/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "0.0.29"
__version__ = "0.0.30"


from estimagic.optimization.optimize import minimize # noqa: F401
Expand Down
13 changes: 13 additions & 0 deletions estimagic/decorators.py
Original file line number Diff line number Diff line change
Expand Up @@ -114,6 +114,19 @@ def wrapper_negative_criterion(*args, **kwargs):
return wrapper_negative_criterion


def negative_gradient(gradient):
"""Switch the sign of the gradient."""
if gradient is None:
wrapper_negative_gradient = None
else:

@functools.wraps(gradient)
def wrapper_negative_gradient(*args, **kwargs):
return -1 * gradient(*args, **kwargs)

return wrapper_negative_gradient


def log_evaluation(func=None, *, database, tables):
"""Log parameters and fitness values.
Expand Down
1 change: 1 addition & 0 deletions estimagic/optimization/broadcast_arguments.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@ def broadcast_arguments(**arguments):
"log_options",
"dash_options",
"general_options",
"gradient_kwargs",
]
for arg in dict_args:
if arg in arguments and arguments[arg] is None:
Expand Down
1 change: 1 addition & 0 deletions estimagic/optimization/check_arguments.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ def check_arguments(arguments):
"algorithm": str,
"algo_options": dict,
"gradient": (typing.Callable, type(None)),
"gradient_kwargs": dict,
"gradient_options": (dict, type(None)),
"log_options": dict,
"criterion_kwargs": dict,
Expand Down
46 changes: 37 additions & 9 deletions estimagic/optimization/optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@

from estimagic.config import DEFAULT_DATABASE_NAME
from estimagic.dashboard.run_dashboard import run_dashboard_in_separate_process
from estimagic.decorators import negative_gradient
from estimagic.logging.update_database import update_scalar_field
from estimagic.optimization.broadcast_arguments import broadcast_arguments
from estimagic.optimization.check_arguments import check_arguments
Expand All @@ -23,6 +24,8 @@ def maximize(
constraints=None,
general_options=None,
algo_options=None,
gradient=None,
gradient_kwargs=None,
gradient_options=None,
logging=DEFAULT_DATABASE_NAME,
log_options=None,
Expand Down Expand Up @@ -59,6 +62,9 @@ def maximize(
after the optimization(s) finish(es).
algo_options (dict or list of dicts): Algorithm specific configurations for the
optimization.
gradient (callable): Gradient of the criterion function. Takes params as first
argument and returns the gradient as numpy array or pandas Series.
gradient_kwargs (dict): Additional keyword arguments for the gradient.
gradient_options (dict): Options for the gradient function.
logging (str or pathlib.Path or list): Path(s) to (an) sqlite3 file(s) which
typically has the file extension ``.db``. If the file does not exist,
Expand All @@ -82,6 +88,11 @@ def maximize(
general_options = {} if general_options is None else general_options
general_options["_maximization"] = True

if isinstance(gradient, list):
gradient = [negative_gradient(grad) for grad in gradient]
else:
gradient = negative_gradient(gradient)

results = minimize(
criterion=criterion,
params=params,
Expand All @@ -90,6 +101,8 @@ def maximize(
constraints=constraints,
general_options=general_options,
algo_options=algo_options,
gradient=gradient,
gradient_kwargs=gradient_kwargs,
gradient_options=gradient_options,
logging=logging,
log_options=log_options,
Expand All @@ -99,15 +112,27 @@ def maximize(

# Change the fitness value. ``results`` is either a tuple of results and params or a
# list of tuples.
if isinstance(results, list):
for result in results:
result[0]["fitness"] = -result[0]["fitness"]
else:
results[0]["fitness"] = -results[0]["fitness"]
if not isinstance(results, list):
results = [results]

results = [_undo_sign_switch(res) for res in results]

results = results[0] if len(results) == 1 else results

return results


def _undo_sign_switch(res):
info, params = res
info = info.copy()
info["fitness"] = -info["fitness"]
if "jacobian" in info and info["jacobian"] is not None:
info["jacobian"] = (-np.array(info["jacobian"])).tolist()
if "hessian" in info and info["hessian"] is not None:
info["hessian"] = (-np.array(info["hessian"])).tolist()
return info, params


def minimize(
criterion,
params,
Expand All @@ -116,6 +141,8 @@ def minimize(
constraints=None,
general_options=None,
algo_options=None,
gradient=None,
gradient_kwargs=None,
gradient_options=None,
logging=DEFAULT_DATABASE_NAME,
log_options=None,
Expand Down Expand Up @@ -152,6 +179,9 @@ def minimize(
after the optimization(s) finish(es).
algo_options (dict or list of dicts): Algorithm specific configurations for the
optimization.
gradient (callable): Gradient of the criterion function. Takes params as first
argument and returns the gradient as numpy array or pandas Series.
gradient_kwargs (dict): Additional keyword arguments for the gradient.
gradient_options (dict): Options for the gradient function.
logging (str or pathlib.Path or list): Path(s) to (an) sqlite3 file(s) which
typically has the file extension ``.db``. If the file does not exist,
Expand All @@ -172,9 +202,6 @@ def minimize(
of the untransformed problem as specified of the user.
"""
# Gradients are currently not allowed to be passed to minimize.
gradient = None

arguments = broadcast_arguments(
criterion=criterion,
params=params,
Expand All @@ -184,6 +211,7 @@ def minimize(
general_options=general_options,
algo_options=algo_options,
gradient=gradient,
gradient_kwargs=gradient_kwargs,
gradient_options=gradient_options,
logging=logging,
log_options=log_options,
Expand Down Expand Up @@ -353,7 +381,7 @@ def _process_optimization_results(results, results_arguments):
res["x"] = list(res["x"])
start_params = args["params"]
params = reparametrize_from_internal(
internal=res["x"],
internal=np.array(res["x"]),
fixed_values=start_params["_internal_fixed_value"].to_numpy(),
pre_replacements=start_params["_pre_replacements"].to_numpy(dtype="int"),
processed_constraints=args["constraints"],
Expand Down
Loading

0 comments on commit 6ec5f85

Please sign in to comment.