Hi I am working on implementing gradient descent with backtracking line search. However when I try to update f(x0) the value doesn't change. Could it be something going on with the lambda expression, I am not too familiar with them?
import numpy as np
import math
alpha = 0.1
beta = 0.6
f = lambda x: math.exp(x[0] + 3*x[1] - 0.1) + math.exp(x[0] - 3*x[1] -0.1) + math.exp(-1*x[0] - 0.1)
dfx1 = lambda x: math.exp(x[0] + 3*x[1] - 0.1) + math.exp(x[0] - 3*x[1] -0.1) - math.exp(-x[0] - 0.1)
dfx2 = lambda x: 3*math.exp(x[0] + 3*x[1] - 0.1) - 3*math.exp(x[0] - 3*x[1] -0.1)
t = 1
count = 1
x0 = np.array([1.0,1.0])
dx0 = np.array([1e-3, 1e-3])
x = []
d = np.array([-1*dfx1(x0),-1*dfx2(x0)]);
grad = np.array([1*dfx1(x0),1*dfx2(x0)])
def backtrack(x0, dfx1, dfx2, t, alpha, beta, count):
while (f(x0 + t*d) > f(x0) + alpha*t*np.dot(d,grad) or count < 50 ):
d[0] = -1*dfx1(x0);
d[1] = -1*dfx2(x0);
grad[0] = dfx1(x0);
grad[1] = dfx2(x0);
x0[0] = x0[0] + t*d[0];
x0[1] = x0[1] + t*d[1];
t *= beta;
count += 1
x.append(f(x0));
return t
t = backtrack(x0, dfx1, dfx2, t, alpha, beta,count)
print("\nfinal step size :", t)
print(np.log(x))
print(f(x0))