Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Added Newton_Raphson and simple_gd optimizers. #512

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
23 changes: 23 additions & 0 deletions autograd/misc/optimizers.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,16 @@ def _optimize(grad, x0, callback=None, *args, **kwargs):

return _optimize

@unflatten_optimizer
def simple_gd(grad, x, callback=None, num_iters=2000, step_size=0.01):
"""A simple gradient descent without momentum.
grad() must have signature grad(x, i), where i is the iteration number."""
for i in range(num_iters):
g = - grad(x, i)
if callback: callback(x, i, g)
x = x + step_size * g
return x

@unflatten_optimizer
def sgd(grad, x, callback=None, num_iters=200, step_size=0.1, mass=0.9):
"""Stochastic gradient descent with momentum.
Expand All @@ -41,6 +51,19 @@ def sgd(grad, x, callback=None, num_iters=200, step_size=0.1, mass=0.9):
x = x + step_size * velocity
return x

def Newton_Raphson(grad, hess, x, optimizerFunc=None, *args, **kwargs):
"""A second order optimization method. `hess` is a function with the same signature as `grad`. `optimizerFunc` is a function from this module - this optimizer will use them passing them not gradient, but the step computed by Newton-Raphson method."""
if optimizerFunc is None:
optimizerFunc = simple_gd

def pseudograd(x, i):
g = grad(x, i)
h = hess(x, i)
invH = np.linalg.inv(h)
return np.einsum('ijk,ik->ik', invH, g)
return optimizerFunc(pseudoGrad, x, *args, **kwargs)


@unflatten_optimizer
def rmsprop(grad, x, callback=None, num_iters=100,
step_size=0.1, gamma=0.9, eps=10**-8):
Expand Down