Skip to content

Commit 7b19836

Browse files
committed
added 05 and 06
1 parent 8be6978 commit 7b19836

File tree

4 files changed

+231
-0
lines changed

4 files changed

+231
-0
lines changed

05_1_gradientdescent_manually.py

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
import numpy as np
2+
3+
# Compute every step manually
4+
5+
# Linear regression
6+
# f = w * x
7+
8+
# here : f = 2 * x
9+
X = np.array([1, 2, 3, 4], dtype=np.float32)
10+
Y = np.array([2, 4, 6, 8], dtype=np.float32)
11+
12+
w = 0.0
13+
14+
# model output
15+
def forward(x):
16+
return w * x
17+
18+
# loss = MSE
19+
def loss(y, y_pred):
20+
return ((y_pred - y)**2).mean()
21+
22+
# J = MSE = 1/N * (w*x - y)**2
23+
# dJ/dw = 1/N * 2x(w*x - y)
24+
def gradient(x, y, y_pred):
25+
return np.dot(2*x, y_pred - y).mean()
26+
27+
print(f'Prediction before training: f(5) = {forward(5):.3f}')
28+
29+
# Training
30+
learning_rate = 0.01
31+
n_iters = 20
32+
33+
for epoch in range(n_iters):
34+
# predict = forward pass
35+
y_pred = forward(X)
36+
37+
# loss
38+
l = loss(Y, y_pred)
39+
40+
# calculate gradients
41+
dw = gradient(X, Y, y_pred)
42+
43+
# update weights
44+
w -= learning_rate * dw
45+
46+
if epoch % 2 == 0:
47+
print(f'epoch {epoch+1}: w = {w:.3f}, loss = {l:.8f}')
48+
49+
print(f'Prediction after training: f(5) = {forward(5):.3f}')

05_2_gradientdescent_auto.py

Lines changed: 49 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,49 @@
1+
import torch
2+
3+
# Here we replace the manually computed gradient with autograd
4+
5+
# Linear regression
6+
# f = w * x
7+
8+
# here : f = 2 * x
9+
X = torch.tensor([1, 2, 3, 4], dtype=torch.float32)
10+
Y = torch.tensor([2, 4, 6, 8], dtype=torch.float32)
11+
12+
w = torch.tensor(0.0, dtype=torch.float32, requires_grad=True)
13+
14+
# model output
15+
def forward(x):
16+
return w * x
17+
18+
# loss = MSE
19+
def loss(y, y_pred):
20+
return ((y_pred - y)**2).mean()
21+
22+
print(f'Prediction before training: f(5) = {forward(5).item():.3f}')
23+
24+
# Training
25+
learning_rate = 0.01
26+
n_iters = 100
27+
28+
for epoch in range(n_iters):
29+
# predict = forward pass
30+
y_pred = forward(X)
31+
32+
# loss
33+
l = loss(Y, y_pred)
34+
35+
# calculate gradients = backward pass
36+
l.backward()
37+
38+
# update weights
39+
#w.data = w.data - learning_rate * w.grad
40+
with torch.no_grad():
41+
w -= learning_rate * w.grad
42+
43+
# zero the gradients after updating
44+
w.grad.zero_()
45+
46+
if epoch % 10 == 0:
47+
print(f'epoch {epoch+1}: w = {w.item():.3f}, loss = {l.item():.8f}')
48+
49+
print(f'Prediction after training: f(5) = {forward(5).item():.3f}')

06_1_loss_and_optimizer.py

Lines changed: 76 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,76 @@
1+
# 1) Design model (input, output, forward pass with different layers)
2+
# 2) Construct loss and optimizer
3+
# 3) Training loop
4+
# - Forward = compute prediction and loss
5+
# - Backward = compute gradients
6+
# - Update weights
7+
8+
import torch
9+
import torch.nn as nn
10+
11+
# Linear regression
12+
# f = w * x
13+
14+
# here : f = 2 * x
15+
16+
# 0) Training samples, watch the shape!
17+
X = torch.tensor([[1], [2], [3], [4]], dtype=torch.float32)
18+
Y = torch.tensor([[2], [4], [6], [8]], dtype=torch.float32)
19+
20+
n_samples, n_features = X.shape
21+
print(f'#samples: {n_samples}, #features: {n_features}')
22+
# 0) create a test sample
23+
X_test = torch.tensor([5], dtype=torch.float32)
24+
25+
# 1) Design Model, the model has to implement the forward pass!
26+
# Here we can use a built-in model from PyTorch
27+
input_size = n_features
28+
output_size = n_features
29+
30+
# we can call this model with samples X
31+
model = nn.Linear(input_size, output_size)
32+
33+
'''
34+
class LinearRegression(nn.Module):
35+
def __init__(self, input_dim, output_dim):
36+
super(LinearRegression, self).__init__()
37+
# define diferent layers
38+
self.lin = nn.Linear(input_dim, output_dim)
39+
40+
def forward(self, x):
41+
return self.lin(x)
42+
43+
model = LinearRegression(input_size, output_size)
44+
'''
45+
46+
print(f'Prediction before training: f(5) = {model(X_test).item():.3f}')
47+
48+
# 2) Define loss and optimizer
49+
learning_rate = 0.01
50+
n_iters = 100
51+
52+
loss = nn.MSELoss()
53+
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate)
54+
55+
# 3) Training loop
56+
for epoch in range(n_iters):
57+
# predict = forward pass with our model
58+
y_predicted = model(X)
59+
60+
# loss
61+
l = loss(Y, y_predicted)
62+
63+
# calculate gradients = backward pass
64+
l.backward()
65+
66+
# update weights
67+
optimizer.step()
68+
69+
# zero the gradients after updating
70+
optimizer.zero_grad()
71+
72+
if epoch % 10 == 0:
73+
[w, b] = model.parameters() # unpack parameters
74+
print('epoch ', epoch+1, ': w = ', w[0][0].item(), ' loss = ', l)
75+
76+
print(f'Prediction after training: f(5) = {model(X_test).item():.3f}')

06_2_model_loss_and_ optimizer.py

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
# 1) Design model (input, output, forward pass with different layers)
2+
# 2) Construct loss and optimizer
3+
# 3) Training loop
4+
# - Forward = compute prediction and loss
5+
# - Backward = compute gradients
6+
# - Update weights
7+
8+
import torch
9+
import torch.nn as nn
10+
11+
# Linear regression
12+
# f = w * x
13+
14+
# here : f = 2 * x
15+
16+
# 0) Training samples
17+
X = torch.tensor([1, 2, 3, 4], dtype=torch.float32)
18+
Y = torch.tensor([2, 4, 6, 8], dtype=torch.float32)
19+
20+
# 1) Design Model: Weights to optimize and forward function
21+
w = torch.tensor(0.0, dtype=torch.float32, requires_grad=True)
22+
23+
def forward(x):
24+
return w * x
25+
26+
print(f'Prediction before training: f(5) = {forward(5).item():.3f}')
27+
28+
# 2) Define loss and optimizer
29+
learning_rate = 0.01
30+
n_iters = 100
31+
32+
# callable function
33+
loss = nn.MSELoss()
34+
35+
optimizer = torch.optim.SGD([w], lr=learning_rate)
36+
37+
# 3) Training loop
38+
for epoch in range(n_iters):
39+
# predict = forward pass
40+
y_predicted = forward(X)
41+
42+
# loss
43+
l = loss(Y, y_predicted)
44+
45+
# calculate gradients = backward pass
46+
l.backward()
47+
48+
# update weights
49+
optimizer.step()
50+
51+
# zero the gradients after updating
52+
optimizer.zero_grad()
53+
54+
if epoch % 10 == 0:
55+
print('epoch ', epoch+1, ': w = ', w, ' loss = ', l)
56+
57+
print(f'Prediction after training: f(5) = {forward(5).item():.3f}')

0 commit comments

Comments
 (0)