Skip to content

Commit b8488d9

Browse files
author
euler16
committed
made beginner_source pep8 compliant
1 parent f71137c commit b8488d9

19 files changed

+323
-288
lines changed

beginner_source/blitz/autograd_tutorial.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -117,4 +117,5 @@
117117
###############################################################
118118
# **Read Later:**
119119
#
120-
# Documentation of ``Variable`` and ``Function`` is at http://pytorch.org/docs/autograd
120+
# Documentation of ``Variable`` and ``Function`` is at
121+
# http://pytorch.org/docs/autograd

beginner_source/blitz/cifar10_tutorial.py

Lines changed: 11 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -87,11 +87,14 @@
8787
import numpy as np
8888

8989
# functions to show an image
90+
91+
9092
def imshow(img):
9193
img = img / 2 + 0.5 # unnormalize
9294
npimg = img.numpy()
9395
plt.imshow(np.transpose(npimg, (1, 2, 0)))
9496

97+
9598
# get some random training images
9699
dataiter = iter(trainloader)
97100
images, labels = dataiter.next()
@@ -112,15 +115,16 @@ def imshow(img):
112115
import torch.nn as nn
113116
import torch.nn.functional as F
114117

118+
115119
class Net(nn.Module):
116120
def __init__(self):
117121
super(Net, self).__init__()
118122
self.conv1 = nn.Conv2d(3, 6, 5)
119-
self.pool = nn.MaxPool2d(2, 2)
123+
self.pool = nn.MaxPool2d(2, 2)
120124
self.conv2 = nn.Conv2d(6, 16, 5)
121-
self.fc1 = nn.Linear(16 * 5 * 5, 120)
122-
self.fc2 = nn.Linear(120, 84)
123-
self.fc3 = nn.Linear(84, 10)
125+
self.fc1 = nn.Linear(16 * 5 * 5, 120)
126+
self.fc2 = nn.Linear(120, 84)
127+
self.fc3 = nn.Linear(84, 10)
124128

125129
def forward(self, x):
126130
x = self.pool(F.relu(self.conv1(x)))
@@ -131,6 +135,7 @@ def forward(self, x):
131135
x = self.fc3(x)
132136
return x
133137

138+
134139
net = Net()
135140

136141
########################################################################
@@ -173,7 +178,8 @@ def forward(self, x):
173178
# print statistics
174179
running_loss += loss.data[0]
175180
if i % 2000 == 1999: # print every 2000 mini-batches
176-
print('[%d, %5d] loss: %.3f' % (epoch+1, i+1, running_loss / 2000))
181+
print('[%d, %5d] loss: %.3f' %
182+
(epoch + 1, i + 1, running_loss / 2000))
177183
running_loss = 0.0
178184

179185
print('Finished Training')

beginner_source/blitz/neural_networks_tutorial.py

Lines changed: 15 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -47,16 +47,20 @@ class Net(nn.Module):
4747

4848
def __init__(self):
4949
super(Net, self).__init__()
50-
# 1 input image channel, 6 output channels, 5x5 square convolution kernel
50+
# 1 input image channel, 6 output channels, 5x5 square convolution
51+
# kernel
5152
self.conv1 = nn.Conv2d(1, 6, 5)
5253
self.conv2 = nn.Conv2d(6, 16, 5)
53-
self.fc1 = nn.Linear(16 * 5 * 5, 120) # an affine operation: y = Wx + b
54-
self.fc2 = nn.Linear(120, 84)
55-
self.fc3 = nn.Linear(84, 10)
54+
# an affine operation: y = Wx + b
55+
self.fc1 = nn.Linear(16 * 5 * 5, 120)
56+
self.fc2 = nn.Linear(120, 84)
57+
self.fc3 = nn.Linear(84, 10)
5658

5759
def forward(self, x):
58-
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2)) # Max pooling over a (2, 2) window
59-
x = F.max_pool2d(F.relu(self.conv2(x)), 2) # If the size is a square you can only specify a single number
60+
# Max pooling over a (2, 2) window
61+
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
62+
# If the size is a square you can only specify a single number
63+
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
6064
x = x.view(-1, self.num_flat_features(x))
6165
x = F.relu(self.fc1(x))
6266
x = F.relu(self.fc2(x))
@@ -70,6 +74,7 @@ def num_flat_features(self, x):
7074
num_features *= s
7175
return num_features
7276

77+
7378
net = Net()
7479
print(net)
7580

@@ -93,7 +98,8 @@ def num_flat_features(self, x):
9398
print(out)
9499

95100
########################################################################
96-
# Zero the gradient buffers of all parameters and backprops with random gradients:
101+
# Zero the gradient buffers of all parameters and backprops with random
102+
# gradients:
97103
net.zero_grad()
98104
out.backward(torch.randn(1, 10))
99105

@@ -111,7 +117,7 @@ def num_flat_features(self, x):
111117
# a fake batch dimension.
112118
#
113119
# Before proceeding further, let's recap all the classes you’ve seen so far.
114-
#
120+
#
115121
# **Recap:**
116122
# - ``torch.Tensor`` - A *multi-dimensional array*.
117123
# - ``autograd.Variable`` - *Wraps a Tensor and records the history of
@@ -145,7 +151,7 @@ def num_flat_features(self, x):
145151
# There are several different
146152
# `loss functions <http://pytorch.org/docs/nn.html#loss-functions>`_ under the
147153
# nn package .
148-
# A simple loss is: ``nn.MSELoss`` which computes the mean-squared error
154+
# A simple loss is: ``nn.MSELoss`` which computes the mean-squared error
149155
# between the input and the target.
150156
#
151157
# For example:

beginner_source/examples_autograd/tf_two_layer_net.py

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -60,18 +60,19 @@
6060
# Now we have built our computational graph, so we enter a TensorFlow session to
6161
# actually execute the graph.
6262
with tf.Session() as sess:
63-
# Run the graph once to initialize the Variables w1 and w2.
64-
sess.run(tf.global_variables_initializer())
63+
# Run the graph once to initialize the Variables w1 and w2.
64+
sess.run(tf.global_variables_initializer())
6565

66-
# Create numpy arrays holding the actual data for the inputs x and targets y
67-
x_value = np.random.randn(N, D_in)
68-
y_value = np.random.randn(N, D_out)
69-
for _ in range(500):
70-
# Execute the graph many times. Each time it executes we want to bind
71-
# x_value to x and y_value to y, specified with the feed_dict argument.
72-
# Each time we execute the graph we want to compute the values for loss,
73-
# new_w1, and new_w2; the values of these Tensors are returned as numpy
74-
# arrays.
75-
loss_value, _, _ = sess.run([loss, new_w1, new_w2],
76-
feed_dict={x: x_value, y: y_value})
77-
print(loss_value)
66+
# Create numpy arrays holding the actual data for the inputs x and targets
67+
# y
68+
x_value = np.random.randn(N, D_in)
69+
y_value = np.random.randn(N, D_out)
70+
for _ in range(500):
71+
# Execute the graph many times. Each time it executes we want to bind
72+
# x_value to x and y_value to y, specified with the feed_dict argument.
73+
# Each time we execute the graph we want to compute the values for loss,
74+
# new_w1, and new_w2; the values of these Tensors are returned as numpy
75+
# arrays.
76+
loss_value, _, _ = sess.run([loss, new_w1, new_w2],
77+
feed_dict={x: x_value, y: y_value})
78+
print(loss_value)

beginner_source/examples_autograd/two_layer_net_autograd.py

Lines changed: 23 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -42,31 +42,30 @@
4242

4343
learning_rate = 1e-6
4444
for t in range(500):
45-
# Forward pass: compute predicted y using operations on Variables; these
46-
# are exactly the same operations we used to compute the forward pass using
47-
# Tensors, but we do not need to keep references to intermediate values since
48-
# we are not implementing the backward pass by hand.
49-
y_pred = x.mm(w1).clamp(min=0).mm(w2)
45+
# Forward pass: compute predicted y using operations on Variables; these
46+
# are exactly the same operations we used to compute the forward pass using
47+
# Tensors, but we do not need to keep references to intermediate values since
48+
# we are not implementing the backward pass by hand.
49+
y_pred = x.mm(w1).clamp(min=0).mm(w2)
5050

51-
# Compute and print loss using operations on Variables.
52-
# Now loss is a Variable of shape (1,) and loss.data is a Tensor of shape
53-
# (1,); loss.data[0] is a scalar value holding the loss.
54-
loss = (y_pred - y).pow(2).sum()
55-
print(t, loss.data[0])
51+
# Compute and print loss using operations on Variables.
52+
# Now loss is a Variable of shape (1,) and loss.data is a Tensor of shape
53+
# (1,); loss.data[0] is a scalar value holding the loss.
54+
loss = (y_pred - y).pow(2).sum()
55+
print(t, loss.data[0])
5656

57-
# Use autograd to compute the backward pass. This call will compute the
58-
# gradient of loss with respect to all Variables with requires_grad=True.
59-
# After this call w1.grad and w2.grad will be Variables holding the gradient
60-
# of the loss with respect to w1 and w2 respectively.
61-
loss.backward()
57+
# Use autograd to compute the backward pass. This call will compute the
58+
# gradient of loss with respect to all Variables with requires_grad=True.
59+
# After this call w1.grad and w2.grad will be Variables holding the gradient
60+
# of the loss with respect to w1 and w2 respectively.
61+
loss.backward()
6262

63-
# Update weights using gradient descent; w1.data and w2.data are Tensors,
64-
# w1.grad and w2.grad are Variables and w1.grad.data and w2.grad.data are
65-
# Tensors.
66-
w1.data -= learning_rate * w1.grad.data
67-
w2.data -= learning_rate * w2.grad.data
68-
69-
# Manually zero the gradients after updating weights
70-
w1.grad.data.zero_()
71-
w2.grad.data.zero_()
63+
# Update weights using gradient descent; w1.data and w2.data are Tensors,
64+
# w1.grad and w2.grad are Variables and w1.grad.data and w2.grad.data are
65+
# Tensors.
66+
w1.data -= learning_rate * w1.grad.data
67+
w2.data -= learning_rate * w2.grad.data
7268

69+
# Manually zero the gradients after updating weights
70+
w1.grad.data.zero_()
71+
w2.grad.data.zero_()

beginner_source/examples_autograd/two_layer_net_custom_function.py

Lines changed: 39 additions & 38 deletions
Original file line numberDiff line numberDiff line change
@@ -15,31 +15,33 @@
1515
import torch
1616
from torch.autograd import Variable
1717

18+
1819
class MyReLU(torch.autograd.Function):
19-
"""
20-
We can implement our own custom autograd Functions by subclassing
21-
torch.autograd.Function and implementing the forward and backward passes
22-
which operate on Tensors.
23-
"""
24-
def forward(self, input):
2520
"""
26-
In the forward pass we receive a Tensor containing the input and return a
27-
Tensor containing the output. You can cache arbitrary Tensors for use in the
28-
backward pass using the save_for_backward method.
21+
We can implement our own custom autograd Functions by subclassing
22+
torch.autograd.Function and implementing the forward and backward passes
23+
which operate on Tensors.
2924
"""
30-
self.save_for_backward(input)
31-
return input.clamp(min=0)
3225

33-
def backward(self, grad_output):
34-
"""
35-
In the backward pass we receive a Tensor containing the gradient of the loss
36-
with respect to the output, and we need to compute the gradient of the loss
37-
with respect to the input.
38-
"""
39-
input, = self.saved_tensors
40-
grad_input = grad_output.clone()
41-
grad_input[input < 0] = 0
42-
return grad_input
26+
def forward(self, input):
27+
"""
28+
In the forward pass we receive a Tensor containing the input and return a
29+
Tensor containing the output. You can cache arbitrary Tensors for use in the
30+
backward pass using the save_for_backward method.
31+
"""
32+
self.save_for_backward(input)
33+
return input.clamp(min=0)
34+
35+
def backward(self, grad_output):
36+
"""
37+
In the backward pass we receive a Tensor containing the gradient of the loss
38+
with respect to the output, and we need to compute the gradient of the loss
39+
with respect to the input.
40+
"""
41+
input, = self.saved_tensors
42+
grad_input = grad_output.clone()
43+
grad_input[input < 0] = 0
44+
return grad_input
4345

4446

4547
dtype = torch.FloatTensor
@@ -59,25 +61,24 @@ def backward(self, grad_output):
5961

6062
learning_rate = 1e-6
6163
for t in range(500):
62-
# Construct an instance of our MyReLU class to use in our network
63-
relu = MyReLU()
64-
65-
# Forward pass: compute predicted y using operations on Variables; we compute
66-
# ReLU using our custom autograd operation.
67-
y_pred = relu(x.mm(w1)).mm(w2)
64+
# Construct an instance of our MyReLU class to use in our network
65+
relu = MyReLU()
6866

69-
# Compute and print loss
70-
loss = (y_pred - y).pow(2).sum()
71-
print(t, loss.data[0])
67+
# Forward pass: compute predicted y using operations on Variables; we compute
68+
# ReLU using our custom autograd operation.
69+
y_pred = relu(x.mm(w1)).mm(w2)
7270

73-
# Use autograd to compute the backward pass.
74-
loss.backward()
71+
# Compute and print loss
72+
loss = (y_pred - y).pow(2).sum()
73+
print(t, loss.data[0])
7574

76-
# Update weights using gradient descent
77-
w1.data -= learning_rate * w1.grad.data
78-
w2.data -= learning_rate * w2.grad.data
75+
# Use autograd to compute the backward pass.
76+
loss.backward()
7977

80-
# Manually zero the gradients after updating weights
81-
w1.grad.data.zero_()
82-
w2.grad.data.zero_()
78+
# Update weights using gradient descent
79+
w1.data -= learning_rate * w1.grad.data
80+
w2.data -= learning_rate * w2.grad.data
8381

82+
# Manually zero the gradients after updating weights
83+
w1.grad.data.zero_()
84+
w2.grad.data.zero_()

beginner_source/examples_nn/dynamic_net.py

Lines changed: 36 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -12,36 +12,37 @@
1212
import torch
1313
from torch.autograd import Variable
1414

15+
1516
class DynamicNet(torch.nn.Module):
16-
def __init__(self, D_in, H, D_out):
17-
"""
18-
In the constructor we construct three nn.Linear instances that we will use
19-
in the forward pass.
20-
"""
21-
super(DynamicNet, self).__init__()
22-
self.input_linear = torch.nn.Linear(D_in, H)
23-
self.middle_linear = torch.nn.Linear(H, H)
24-
self.output_linear = torch.nn.Linear(H, D_out)
17+
def __init__(self, D_in, H, D_out):
18+
"""
19+
In the constructor we construct three nn.Linear instances that we will use
20+
in the forward pass.
21+
"""
22+
super(DynamicNet, self).__init__()
23+
self.input_linear = torch.nn.Linear(D_in, H)
24+
self.middle_linear = torch.nn.Linear(H, H)
25+
self.output_linear = torch.nn.Linear(H, D_out)
2526

26-
def forward(self, x):
27-
"""
28-
For the forward pass of the model, we randomly choose either 0, 1, 2, or 3
29-
and reuse the middle_linear Module that many times to compute hidden layer
30-
representations.
27+
def forward(self, x):
28+
"""
29+
For the forward pass of the model, we randomly choose either 0, 1, 2, or 3
30+
and reuse the middle_linear Module that many times to compute hidden layer
31+
representations.
3132
32-
Since each forward pass builds a dynamic computation graph, we can use normal
33-
Python control-flow operators like loops or conditional statements when
34-
defining the forward pass of the model.
33+
Since each forward pass builds a dynamic computation graph, we can use normal
34+
Python control-flow operators like loops or conditional statements when
35+
defining the forward pass of the model.
3536
36-
Here we also see that it is perfectly safe to reuse the same Module many
37-
times when defining a computational graph. This is a big improvement from Lua
38-
Torch, where each Module could be used only once.
39-
"""
40-
h_relu = self.input_linear(x).clamp(min=0)
41-
for _ in range(random.randint(0, 3)):
42-
h_relu = self.middle_linear(h_relu).clamp(min=0)
43-
y_pred = self.output_linear(h_relu)
44-
return y_pred
37+
Here we also see that it is perfectly safe to reuse the same Module many
38+
times when defining a computational graph. This is a big improvement from Lua
39+
Torch, where each Module could be used only once.
40+
"""
41+
h_relu = self.input_linear(x).clamp(min=0)
42+
for _ in range(random.randint(0, 3)):
43+
h_relu = self.middle_linear(h_relu).clamp(min=0)
44+
y_pred = self.output_linear(h_relu)
45+
return y_pred
4546

4647

4748
# N is batch size; D_in is input dimension;
@@ -60,14 +61,14 @@ def forward(self, x):
6061
criterion = torch.nn.MSELoss(size_average=False)
6162
optimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)
6263
for t in range(500):
63-
# Forward pass: Compute predicted y by passing x to the model
64-
y_pred = model(x)
64+
# Forward pass: Compute predicted y by passing x to the model
65+
y_pred = model(x)
6566

66-
# Compute and print loss
67-
loss = criterion(y_pred, y)
68-
print(t, loss.data[0])
67+
# Compute and print loss
68+
loss = criterion(y_pred, y)
69+
print(t, loss.data[0])
6970

70-
# Zero gradients, perform a backward pass, and update the weights.
71-
optimizer.zero_grad()
72-
loss.backward()
73-
optimizer.step()
71+
# Zero gradients, perform a backward pass, and update the weights.
72+
optimizer.zero_grad()
73+
loss.backward()
74+
optimizer.step()

0 commit comments

Comments
 (0)