-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathaugmentedLagrangianDescent.py
120 lines (99 loc) · 4.21 KB
/
augmentedLagrangianDescent.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
# Optimization for Engineers - Dr.Johannes Hild
# augmented Lagrangian descent
# Purpose: Find xmin to satisfy the stationarity condition for the augmented Lagrangian: norm(xmin - P(xmin - gradA(xmin)))<=eps
# And xmin also satisfies the feasibility condition norm(h(xmin))<=delta.
# Input Definition:
# f: objective class with methods .objective() and .gradient() and .hessian()
# P: box projection class with method .project() and .activeIndexSet()
# h: objective class with methods .objective() and .gradient() and .hessian(), equality constraint.
# x0: column vector in R ** n(domain point)
# alpha0: real value, starting guess for Lagrangian multiplier for h. Default value: 0.
# eps: tolerance for termination. Default value: 1.0e-3
# delta: positive value in (0,eps), tolerance for feasibility. Default value: 1.0e-6.
# verbose: bool, if set to true, verbose information is displayed.
# Output Definition:
# xmin: column vector in R ** n(domain point)
# alphamin: real value, approximates Lagrangian multiplier.
# Required files:
# xmin_sub = projectedNewtonDescent(f, P, x_sub, eps_sub)
# myAugmentedObjective = augmentedLagrangianObjective(f, h, alpha, gamma)
# Test cases:
# A = np.array([[4, 0], [0, 2]], dtype=float)
# B = np.array([[0], [0]], dtype=float)
# C = 1
# myObjective = quadraticObjective(A, B, C)
# a = np.array([[0], [0]])
# b = np.array([[2], [2]])
# myBox = projectionInBox(a, b)
# D = np.array([[2, 0], [0, 2]], dtype=float)
# E = np.array([[0], [0]], dtype=float)
# F = -1
# myConstraint = quadraticObjective(D, E, F)
# x0 = np.array([[1], [1]], dtype=float)
# alpha0 = 0
# eps = 1.0e-3
# delta = 1.0e-6
# [xmin, alphamin] = augmentedLagrangianDescent(myObjective, myBox, myConstraint, x0, alpha0, eps, delta, 1)
# should return xmin close to [[0], [1]] and alphamin close to -1
# p = np.array([[ 2.9999039 ], [ 1.99851503], [16.05570494]], dtype=float)
# myObjective = benchmarkObjective(p)
# a = np.array([[0], [-4], [-1]])
# b = np.array([[8], [4], [1]])
# myBox = projectionInBox(a, b)
# D = np.array([[2, 0, 0], [0, 2, 0], [0, 0, 2]], dtype=float)
# E = np.array([[-8], [0], [0]], dtype=float)
# F = 7
# myConstraint = quadraticObjective(D, E, F)
# x0 = np.array([[-8], [-4], [-1]], dtype=float)
# alpha0 = 0
# eps = 1.0e-3
# delta = 1.0e-6
# [xmin, alphamin] = augmentedLagrangianDescent(myObjective, myBox, myConstraint, x0, alpha0, eps, delta, 1)
# should return xmin close to [[5.56], [-2.55], [-0.20]] and alphamin close to 16.4
import numpy as np
import projectedNewtonDescent as PD
import augmentedLagrangianObjective as AO
def matrnr():
# set your matriculation number here
matrnr = 23062789
return matrnr
def augmentedLagrangianDescent(f, P, h, x0: np.array, alpha0=0, eps=1.0e-3, delta=1.0e-6, verbose=0):
if eps <= 0:
raise TypeError('range of eps is wrong!')
if delta <= 0 or delta >= eps:
raise TypeError('range of eps is wrong!')
if verbose:
print('Start augmentedLagrangianDescent...')
countIter = 0
xk = P.project(x0)
hk = h.objective(xk)
alphak = alpha0
gammak = 10
epsk = 1 / gammak
deltak = epsk**0.1
Ak = AO.augmentedLagrangianObjective(f, h, alphak, gammak)
# Ak_obj = Ak.objective(xk)
Ak_grad = Ak.gradient(xk)
while np.linalg.norm(xk - P.project(xk-Ak_grad)) > eps or np.linalg.norm(hk) > delta:
xmin = PD.projectedNewtonDescent(Ak, P, xk, epsk)
xk = xmin
# Ak_obj = Ak.objective(xk)
Ak_grad = Ak.gradient(xk)
hk = h.objective(xk)
if np.linalg.norm(hk) <= deltak:
alphak = alphak + gammak*h.objective(xk)
epsk = max(epsk/gammak,eps)
deltak = max(deltak/gammak**0.9,delta)
else:
gammak = max(10,np.sqrt(gammak))*gammak
epsk = 1/gammak
deltak = epsk**0.1
Ak = AO.augmentedLagrangianObjective(f, h, alphak, gammak)
# Ak_obj = Ak.objective(xk)
Ak_grad = Ak.gradient(xk)
countIter = countIter + 1
xp = xk
alphap = alphak
if verbose:
print('augmentedLagrangianDescent terminated after ', countIter, ' steps')
return [xp, alphap]