Skip to content

Commit 82bcfb2

Browse files
committed
fix position
1 parent 1e82b12 commit 82bcfb2

File tree

2 files changed

+13
-11
lines changed

2 files changed

+13
-11
lines changed

beginner_source/examples_autograd/two_layer_net_autograd.py

+6-5
Original file line numberDiff line numberDiff line change
@@ -46,16 +46,12 @@
4646
# Tensors, but we do not need to keep references to intermediate values since
4747
# we are not implementing the backward pass by hand.
4848
y_pred = x.mm(w1).clamp(min=0).mm(w2)
49-
49+
5050
# Compute and print loss using operations on Variables.
5151
# Now loss is a Variable of shape (1,) and loss.data is a Tensor of shape
5252
# (1,); loss.data[0] is a scalar value holding the loss.
5353
loss = (y_pred - y).pow(2).sum()
5454
print(t, loss.data[0])
55-
56-
# Manually zero the gradients before running the backward pass
57-
w1.grad.data.zero_()
58-
w2.grad.data.zero_()
5955

6056
# Use autograd to compute the backward pass. This call will compute the
6157
# gradient of loss with respect to all Variables with requires_grad=True.
@@ -68,3 +64,8 @@
6864
# Tensors.
6965
w1.data -= learning_rate * w1.grad.data
7066
w2.data -= learning_rate * w2.grad.data
67+
68+
# Manually zero the gradients after updating weights
69+
w1.grad.data.zero_()
70+
w2.grad.data.zero_()
71+

beginner_source/examples_autograd/two_layer_net_custom_function.py

+7-6
Original file line numberDiff line numberDiff line change
@@ -60,22 +60,23 @@ def backward(self, grad_output):
6060
for t in range(500):
6161
# Construct an instance of our MyReLU class to use in our network
6262
relu = MyReLU()
63-
63+
6464
# Forward pass: compute predicted y using operations on Variables; we compute
6565
# ReLU using our custom autograd operation.
6666
y_pred = relu(x.mm(w1)).mm(w2)
67-
67+
6868
# Compute and print loss
6969
loss = (y_pred - y).pow(2).sum()
7070
print(t, loss.data[0])
71-
72-
# Manually zero the gradients before running the backward pass
73-
w1.grad.data.zero_()
74-
w2.grad.data.zero_()
7571

7672
# Use autograd to compute the backward pass.
7773
loss.backward()
7874

7975
# Update weights using gradient descent
8076
w1.data -= learning_rate * w1.grad.data
8177
w2.data -= learning_rate * w2.grad.data
78+
79+
# Manually zero the gradients after updating weights
80+
w1.grad.data.zero_()
81+
w2.grad.data.zero_()
82+

0 commit comments

Comments
 (0)