-
Notifications
You must be signed in to change notification settings - Fork 14
/
Copy pathplot_stochastic_dynamics.py
110 lines (90 loc) · 3.06 KB
/
plot_stochastic_dynamics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
"""
Stochastic constrained optimization dynamics.
================================================
Sets up simple 2-d problems on Linf balls to visualize dynamics of various
stochastic constrained optimization algorithms.
"""
import matplotlib.pyplot as plt
import numpy as np
import torch
from chop.constraints import LinfBall
from chop.stochastic import PGD, PGDMadry, FrankWolfe, S3CM
torch.random.manual_seed(0)
OPTIMIZER_CLASSES = [PGD, PGDMadry, FrankWolfe, S3CM]
def setup_problem(make_nonconvex=False):
radius = 1.
x_star = torch.tensor([radius, radius/2])
x_0 = torch.zeros_like(x_star)
def loss_func(x):
val = .5 * ((x - x_star) ** 2).sum()
if make_nonconvex:
val += .1 * torch.sin(50 * torch.norm(x, p=1) + .1)
return val
constraint = LinfBall(radius)
return x_0, x_star, loss_func, constraint
def optimize(x_0, loss_func, constraint, optimizer_class, iterations=10):
x = x_0.detach().clone()
x.requires_grad = True
# Use Madry's heuristic for step size
lr = {
FrankWolfe: 2.5 / iterations,
PGD: 2.5 * constraint.alpha / iterations * 2.,
PGDMadry: 2.5 / iterations,
S3CM: 2.5 / iterations
}
prox, lmo = constraint.prox, constraint.lmo
constraint_oracles = {
PGD: {
'prox': [prox]
},
PGDMadry: {
'prox': [prox],
'lmo': [lmo]
},
FrankWolfe: {
'lmo': [lmo]
},
S3CM: {
'prox2': [prox]
}
}
optimizer = optimizer_class([x], **constraint_oracles[optimizer_class], lr=lr[optimizer_class])
iterates = [x.data.numpy().copy()]
losses = []
for _ in range(iterations):
optimizer.zero_grad()
loss = loss_func(x)
loss.backward()
optimizer.step()
losses.append(loss.item())
iterates.append(x.data.numpy().copy())
loss = loss_func(x)
losses.append(loss.item())
return losses, iterates
if __name__ == "__main__":
x_0, x_star, loss_func, constraint = setup_problem(make_nonconvex=False)
iterations = 10
losses_all = {}
iterates_all = {}
for opt_class in OPTIMIZER_CLASSES:
losses_, iterates_ = optimize(x_0,
loss_func,
constraint,
opt_class,
iterations)
losses_all[opt_class.name] = losses_
iterates_all[opt_class.name] = iterates_
# print(losses)
fig, ax = plt.subplots()
for opt_class in OPTIMIZER_CLASSES:
ax.plot(np.arange(iterations + 1), losses_all[opt_class.name],
label=opt_class.name)
fig.legend()
plt.show()
fig, axes = plt.subplots(nrows=2, ncols=2, sharex=True, sharey=True)
for ax, opt_class in zip(axes.reshape(-1), OPTIMIZER_CLASSES):
ax.plot(*zip(*iterates_all[opt_class.name]), '-o', label=opt_class.name, alpha=.6)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.legend(loc='lower left')
plt.show()