-
Notifications
You must be signed in to change notification settings - Fork 14
/
Copy pathtraining_constrained_net_on_mnist.py
122 lines (96 loc) · 3.55 KB
/
training_constrained_net_on_mnist.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
"""
Constrained neural network training.
======================================
Trains a LeNet5 model on MNIST using constraints on the weights.
This example is inspired by the official PyTorch MNIST example, which
can be found [here](https://github.com/pytorch/examples/blob/master/mnist/main.py).
"""
from tqdm import tqdm
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from easydict import EasyDict
import chop
# Setup
torch.manual_seed(0)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
# Data Loaders
print("Loading data...")
dataset = chop.utils.data.MNIST("~/datasets/")
loaders = dataset.loaders()
# Model setup
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output
print("Initializing model.")
model = Net()
model.to(device)
criterion = nn.CrossEntropyLoss()
# Outer optimization parameters
nb_epochs = 20
momentum = .9
lr = 0.3
# Make constraints
print("Preparing constraints.")
constraints = chop.constraints.make_Lp_model_constraints(model, p=1, value=10000)
proxes = [constraint.prox if constraint else None for constraint in constraints]
lmos = [constraint.lmo if constraint else None for constraint in constraints]
print("Projecting model parameters in their associated constraint sets.")
chop.constraints.make_feasible(model, proxes)
optimizer = chop.stochastic.FrankWolfe(model.parameters(), lmos,
lr=lr, momentum=momentum,
weight_decay=3e-4,
normalization='gradient')
bias_params = [param for name, param in model.named_parameters() if 'bias' in name]
bias_opt = chop.stochastic.PGD(bias_params, lr=1e-2)
print("Training...")
# Training loop
for epoch in range(nb_epochs):
model.train()
train_loss = 0.
for data, target in tqdm(loaders.train, desc=f'Training epoch {epoch}/{nb_epochs - 1}'):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
bias_opt.zero_grad()
loss = criterion(model(data), target)
loss.backward()
optimizer.step()
bias_opt.step()
train_loss += loss.item()
train_loss /= len(loaders.train)
print(f'Training loss: {train_loss:.3f}')
# Evaluate on clean and adversarial test data
model.eval()
report = EasyDict(nb_test=0, correct=0, correct_adv_pgd=0,
correct_adv_pgd_madry=0,
correct_adv_fw=0, correct_adv_mfw=0)
for data, target in tqdm(loaders.test, desc=f'Val epoch {epoch}/{nb_epochs - 1}'):
data, target = data.to(device), target.to(device)
# Compute corresponding predictions
_, pred = model(data).max(1)
# Get clean accuracies
report.nb_test += data.size(0)
report.correct += pred.eq(target).sum().item()
print(f'Val acc on clean examples (%): {report.correct / report.nb_test * 100.:.3f}')