From b20810a3d21f49760a0a2414f1c7ed32c184e3af Mon Sep 17 00:00:00 2001 From: KOLANICH Date: Tue, 11 Jun 2019 12:34:14 +0300 Subject: [PATCH] Added Newton_Raphson and simple_gd optimizers. --- autograd/misc/optimizers.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/autograd/misc/optimizers.py b/autograd/misc/optimizers.py index 54ebc02ca..e16182e83 100644 --- a/autograd/misc/optimizers.py +++ b/autograd/misc/optimizers.py @@ -29,6 +29,16 @@ def _optimize(grad, x0, callback=None, *args, **kwargs): return _optimize +@unflatten_optimizer +def simple_gd(grad, x, callback=None, num_iters=2000, step_size=0.01): + """A simple gradient descent without momentum. + grad() must have signature grad(x, i), where i is the iteration number.""" + for i in range(num_iters): + g = - grad(x, i) + if callback: callback(x, i, g) + x = x + step_size * g + return x + @unflatten_optimizer def sgd(grad, x, callback=None, num_iters=200, step_size=0.1, mass=0.9): """Stochastic gradient descent with momentum. @@ -41,6 +51,19 @@ def sgd(grad, x, callback=None, num_iters=200, step_size=0.1, mass=0.9): x = x + step_size * velocity return x +def Newton_Raphson(grad, hess, x, optimizerFunc=None, *args, **kwargs): + """A second order optimization method. `hess` is a function with the same signature as `grad`. `optimizerFunc` is a function from this module - this optimizer will use them passing them not gradient, but the step computed by Newton-Raphson method.""" + if optimizerFunc is None: + optimizerFunc = simple_gd + + def pseudograd(x, i): + g = grad(x, i) + h = hess(x, i) + invH = np.linalg.inv(h) + return np.einsum('ijk,ik->ik', invH, g) + return optimizerFunc(pseudoGrad, x, *args, **kwargs) + + @unflatten_optimizer def rmsprop(grad, x, callback=None, num_iters=100, step_size=0.1, gamma=0.9, eps=10**-8):