-
Notifications
You must be signed in to change notification settings - Fork 0
/
Unconstrained_steepest_descent_backtracking_line_search.py
67 lines (49 loc) · 1.37 KB
/
Unconstrained_steepest_descent_backtracking_line_search.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jul 9 11:51:23 2019
@author: fietekrutein
A script to solve an uncsontrained problem using the steepest descent gradient
method with backtracking line search
"""
import numpy as np
# functions as per example
def q10Fx(x):
A1 = np.array([[1,1,-1],[3,-3,0]])
b1 = np.array([-0.1,-0.1,-0.1])
c1 = np.array([1,1,-1])
A2 = np.array([[1,1],[3,-3]])
b2 = np.array([-0.1,-0.1])
c2 = np.array([3,-3])
grads = []
grads.append(np.sum(c1*np.exp(np.dot(x,A1)+b1)))
grads.append(np.sum(c2*np.exp(np.dot(x,A2)+b2)))
return(np.array(grads))
def q10F(x):
A = np.array([[1,1,-1],[3,-3,0]])
b = np.array([-0.1,-0.1,-0.1])
return(np.sum(np.exp(np.dot(x,A)+b)))
# method
def GradDescentBack(x,tol=10e-5):
xCurrent = x
gxVal = q10Fx(xCurrent)
i = 0
while True:
t = 1
xNext = xCurrent - t*gxVal
while q10F(xNext) > (q10F(xCurrent) + alpha*t*np.dot(gxVal,-gxVal)):
t = beta * t
xNext = xCurrent - t*gxVal
gxNextVal = q10Fx(xNext)
if (np.linalg.norm(gxNextVal,2) <= tol) == True:
break
xCurrent = xNext
gxVal = gxNextVal
i=i+1
return xNext , i
# initialize
alpha = 0.1
beta = 0.7
x = np.array([0,0])
# execute
GradDescentBack(x, tol=10e-5)