Skip to content

Commit

Permalink
simply _search
Browse files Browse the repository at this point in the history
  • Loading branch information
linjing-lab committed Dec 14, 2023
1 parent f664a9a commit 0d72acc
Show file tree
Hide file tree
Showing 6 changed files with 27 additions and 19 deletions.
2 changes: 1 addition & 1 deletion optimtool/_drive.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ def C_k(funcs: SympyMutableDenseMatrix,
eta: float,
k: int) -> DataType:
assert k >= 0
return np.array(funcs.subs(dict(zip(args, point[0])))).astype(DataType) if k == 0 else (1 / (Q_k(eta, k))) * (eta * Q_k(eta, k-1) * C_k(funcs, args, point, eta, k - 1) + np.array(funcs.subs(dict(zip(args, point[k])))).astype(DataType))
return np.array(funcs.subs(dict(zip(args, point[0])))).astype(DataType) if k == 0 else (1 / (Q_k(eta, k))) * (eta * Q_k(eta, k-1) * C_k(funcs, args, point, eta, k-1) + np.array(funcs.subs(dict(zip(args, point[k])))).astype(DataType))

def get_f_delta_gradient(resv: NDArray,
argsv: NDArray,
Expand Down
23 changes: 15 additions & 8 deletions optimtool/_search.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,14 +23,16 @@

__all__ = ["armijo", "goldstein", "wolfe", "Grippo", "ZhangHanger"]

def armijo(funcs: SympyMutableDenseMatrix,
def armijo(funcs: SympyMutableDenseMatrix,
res: SympyMutableDenseMatrix,
args: SympyMutableDenseMatrix,
x_0: IterPointType,
d: NDArray,
gamma: float=0.5,
c: float=0.1) -> float:
'''
:param funcs: SympyMutableDenseMatrix, objective function with `convert` process used for search alpha.
:param res: SympyMutableDenseMatrix, gradient function computed by .jacobian function.
:param args: SympyMutableDenseMatrix, symbolic set with order in a list to construct `dict(zip(args, x_0))`.
:param x_0: IterPointType, numerical values in a 'list` or 'tuple` according to the order of `args`.
:param d: NDArray, current gradient descent direction with format at `numpy.ndarray`.
Expand All @@ -44,7 +46,6 @@ def armijo(funcs: SympyMutableDenseMatrix,
assert c > 0
assert c < 1
alpha = 1
res = funcs.jacobian(args)
reps = dict(zip(args, x_0))
f0 = np.array(funcs.subs(reps)).astype(DataType)
res0 = np.array(res.subs(reps)).astype(DataType)
Expand All @@ -58,6 +59,7 @@ def armijo(funcs: SympyMutableDenseMatrix,
return alpha

def goldstein(funcs: SympyMutableDenseMatrix,
res: SympyMutableDenseMatrix,
args: SympyMutableDenseMatrix,
x_0: IterPointType,
d: NDArray,
Expand All @@ -68,6 +70,7 @@ def goldstein(funcs: SympyMutableDenseMatrix,
eps: float=1e-3) -> float:
'''
:param funcs: SympyMutableDenseMatrix, objective function with `convert` process used for search alpha.
:param res: SympyMutableDenseMatrix, gradient function computed by .jacobian function.
:param args: SympyMutableDenseMatrix, symbolic set with order in a list to construct `dict(zip(args, x_0))`.
:param x_0: IterPointType, numerical values in a 'list` or 'tuple` according to the order of `args`.
:param d: NDArray, current gradient descent direction with format at `numpy.ndarray`.
Expand All @@ -86,7 +89,6 @@ def goldstein(funcs: SympyMutableDenseMatrix,
assert t > 0
assert eps > 0
alpha = 1
res = funcs.jacobian(args)
reps = dict(zip(args, x_0))
f0 = np.array(funcs.subs(reps)).astype(DataType)
res0 = np.array(res.subs(reps)).astype(DataType)
Expand All @@ -110,7 +112,8 @@ def goldstein(funcs: SympyMutableDenseMatrix,
break
return alpha

def wolfe(funcs: SympyMutableDenseMatrix,
def wolfe(funcs: SympyMutableDenseMatrix,
res: SympyMutableDenseMatrix,
args: SympyMutableDenseMatrix,
x_0: IterPointType,
d: NDArray,
Expand All @@ -121,6 +124,7 @@ def wolfe(funcs: SympyMutableDenseMatrix,
eps: float=1e-3) -> float:
'''
:param funcs: SympyMutableDenseMatrix, objective function with `convert` process used for search alpha.
:param res: SympyMutableDenseMatrix, gradient function computed by .jacobian function.
:param args: SympyMutableDenseMatrix, symbolic set with order in a list to construct `dict(zip(args, x_0))`.
:param x_0: IterPointType, numerical values in a 'list` or 'tuple` according to the order of `args`.
:param d: NDArray, current gradient descent direction with format at `numpy.ndarray`.
Expand All @@ -141,7 +145,6 @@ def wolfe(funcs: SympyMutableDenseMatrix,
assert alphas < alphae
assert eps > 0
alpha = 1
res = funcs.jacobian(args)
reps = dict(zip(args, x_0))
f0 = np.array(funcs.subs(reps)).astype(DataType)
res0 = np.array(res.subs(reps)).astype(DataType)
Expand All @@ -163,7 +166,8 @@ def wolfe(funcs: SympyMutableDenseMatrix,
return alpha

# coordinate with `barzilar_borwein`.
def Grippo(funcs: SympyMutableDenseMatrix,
def Grippo(funcs: SympyMutableDenseMatrix,
res: SympyMutableDenseMatrix,
args: SympyMutableDenseMatrix,
x_0: IterPointType,
d: NDArray,
Expand All @@ -175,10 +179,12 @@ def Grippo(funcs: SympyMutableDenseMatrix,
M: int) -> float:
'''
:param funcs: SympyMutableDenseMatrix, objective function with `convert` process used for search alpha.
:param res: SympyMutableDenseMatrix, gradient function computed by .jacobian function.
:param args: SympyMutableDenseMatrix, symbolic set with order in a list to construct `dict(zip(args, x_0))`.
:param x_0: IterPointType, numerical values in a 'list` or 'tuple` according to the order of `args`.
:param d: NDArray, current gradient descent direction with format at `numpy.ndarray`.
:param k: int, current number of iterative process in `barzilar_borwein` method.
:param point: List[IterPointType], contains current iteration points in a list.
:param c1: float, constant used to constrain alpha adjusted frequency with interval at (0, 1).
:param beta: float, factor used to expand alpha for adapting to alphas interval.
:param alpha: float, initial step size for nonmonotonic line search method with assert `> 0`.
Expand All @@ -193,7 +199,6 @@ def Grippo(funcs: SympyMutableDenseMatrix,
assert beta > 0
assert beta < 1
reps = dict(zip(args, x_0))
res = funcs.jacobian(args)
res0 = np.array(res.subs(reps)).astype(DataType)
while 1:
x = x_0 + (alpha*d)[0]
Expand All @@ -208,6 +213,7 @@ def Grippo(funcs: SympyMutableDenseMatrix,
return alpha

def ZhangHanger(funcs: SympyMutableDenseMatrix,
res: SympyMutableDenseMatrix,
args: SympyMutableDenseMatrix,
x_0: IterPointType,
d: NDArray,
Expand All @@ -219,10 +225,12 @@ def ZhangHanger(funcs: SympyMutableDenseMatrix,
eta: float) -> float:
'''
:param funcs: SympyMutableDenseMatrix, objective function with `convert` process used for search alpha.
:param res: SympyMutableDenseMatrix, gradient function computed by .jacobian function.
:param args: SympyMutableDenseMatrix, symbolic set with order in a list to construct `dict(zip(args, x_0))`.
:param x_0: IterPointType, numerical values in a 'list` or 'tuple` according to the order of `args`.
:param d: NDArray, current gradient descent direction with format at `numpy.ndarray`.
:param k: int, current number of iterative process in `barzilar_borwein` method.
:param point: List[IterPointType], contains current iteration points in a list.
:param c1: float, constant used to constrain alpha adjusted frequency with interval at (0, 1).
:param beta: float, factor used to expand alpha for adapting to alphas interval.
:param alpha: float, initial step size for nonmonotonic line search method with assert `> 0`.
Expand All @@ -239,7 +247,6 @@ def ZhangHanger(funcs: SympyMutableDenseMatrix,
assert beta < 1
from ._drive import C_k
reps = dict(zip(args, x_0))
res = funcs.jacobian(args)
res0 = np.array(res.subs(reps)).astype(DataType)
while 1:
x = x_0 + (alpha*d)[0]
Expand Down
4 changes: 2 additions & 2 deletions optimtool/unconstrain/gradient_descent.py
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ def steepest(funcs: FuncArray,
print("{}\t{}\t{}".format(x_0, f[-1], k))
dk = -np.array(res.subs(reps)).astype(DataType)
if np.linalg.norm(dk) >= epsilon:
alpha = search(funcs, args, x_0, dk)
alpha = search(funcs, res, args, x_0, dk)
x_0 += alpha * dk[0]
k += 1
else:
Expand Down Expand Up @@ -163,7 +163,7 @@ def barzilar_borwein(funcs: FuncArray,
print("{}\t{}\t{}".format(x_0, f[-1], k))
dk = -np.array(res.subs(reps)).astype(DataType)
if np.linalg.norm(dk) >= epsilon:
alpha = search(funcs, args, x_0, dk, k, point, c1, beta, alpha, constant)
alpha = search(funcs, res, args, x_0, dk, k, point, c1, beta, alpha, constant)
delta = alpha * dk[0]
x_0 = x_0 + delta
yk = np.array(res.subs(dict(zip(args, x_0)))).astype(DataType) + dk
Expand Down
4 changes: 2 additions & 2 deletions optimtool/unconstrain/newton.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,7 +104,7 @@ def modified(funcs: FuncArray,
hessian = h2h(hessian)
dk = -np.linalg.inv(hessian).dot(gradient.T).reshape(1, -1)
if np.linalg.norm(dk) >= epsilon:
alpha = search(funcs, args, x_0, dk)
alpha = search(funcs, res, args, x_0, dk)
x_0 += alpha * dk[0]
k += 1
else:
Expand Down Expand Up @@ -152,7 +152,7 @@ def CG(funcs: FuncArray,
hessian = np.array(hes.subs(reps)).astype(DataType)
dk = conjugate(hessian, -gradient, dk0, eps).reshape(1, -1)
if np.linalg.norm(dk) >= epsilon:
alpha = search(funcs, args, x_0, dk)
alpha = search(funcs, res, args, x_0, dk)
x_0 += alpha * dk[0]
k += 1
else:
Expand Down
6 changes: 3 additions & 3 deletions optimtool/unconstrain/newton_quasi.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def bfgs(funcs: FuncArray,
gradient = np.array(res.subs(reps)).astype(DataType)
dk = -np.linalg.inv(hessian).dot(gradient.T).reshape(1, -1)
if np.linalg.norm(dk) >= epsilon:
alpha = search(funcs, args, x_0, dk)
alpha = search(funcs, res, args, x_0, dk)
delta = alpha * dk # sk
x_0 += delta[0]
yk = np.array(res.subs(dict(zip(args, x_0)))).astype(DataType) - np.array(res.subs(reps)).astype(DataType)
Expand Down Expand Up @@ -116,7 +116,7 @@ def dfp(funcs: FuncArray,
gradient = np.array(res.subs(reps)).astype(DataType)
dk = -hessiani.dot(gradient.T).reshape(1, -1)
if np.linalg.norm(dk) >= epsilon:
alpha = search(funcs, args, x_0, dk)
alpha = search(funcs, res, args, x_0, dk)
delta = alpha * dk # sk
x_0 += delta[0]
yk = np.array(res.subs(dict(zip(args, x_0)))).astype(DataType) - np.array(res.subs(reps)).astype(DataType)
Expand Down Expand Up @@ -174,7 +174,7 @@ def L_BFGS(funcs: FuncArray,
grad = np.array(res.subs(reps)).astype(DataType)
dk = -double_loop(grad, p, s, y, m, k, Hkm).reshape(1, -1)
if np.linalg.norm(dk) >= epsilon:
alphak = search(funcs, args, x_0, dk)
alphak = search(funcs, res, args, x_0, dk)
delta = alphak * dk # sk
x_0 += delta[0]
if k > m:
Expand Down
7 changes: 4 additions & 3 deletions optimtool/unconstrain/nonlinear_least_square.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,18 +52,19 @@ def gauss_newton(funcr: FuncArray,
funcr, args, x_0 = f2m(funcr), a2m(args), p2t(x_0)
assert funcr.shape[0] > 1 and funcr.shape[1] ==1 and args.shape[0] == len(x_0)
search, f = linear_search(method), []
res, funcs = funcr.jacobian(args), sp.Matrix([(1/2)*funcr.T*funcr])
resr, funcs = funcr.jacobian(args), sp.Matrix([(1/2)*funcr.T*funcr])
res = funcs.jacobian(args)
while 1:
reps = dict(zip(args, x_0))
rk = np.array(funcr.subs(reps)).astype(DataType)
f.append(get_value(funcs, args, x_0))
if verbose:
print("{}\t{}\t{}".format(x_0, f[-1], k))
jk = np.array(res.subs(reps)).astype(DataType)
jk = np.array(resr.subs(reps)).astype(DataType)
q, r = np.linalg.qr(jk)
dk = np.linalg.inv(r).dot(-(q.T).dot(rk)).reshape(1,-1) # operate with x_0
if np.linalg.norm(dk) > epsilon:
alpha = search(funcs, args, x_0, dk)
alpha = search(funcs, res, args, x_0, dk)
x_0 += alpha * dk[0]
k += 1
else:
Expand Down

0 comments on commit 0d72acc

Please sign in to comment.