Skip to content

Commit 3d15a1e

Browse files
committed
Merge branch 'master' of github.com:SherlockLiao/pytorch-beginner
2 parents 6fc931d + 5ecddcf commit 3d15a1e

File tree

3 files changed

+70
-2
lines changed

3 files changed

+70
-2
lines changed

.gitignore

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,5 @@
11
data
22
.ipynb_checkpoints
33
*.pth
4+
__pycache__
5+
.vscode

03-Neural Network/neural_network.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22

33
import torch
44
from torch import nn, optim
5-
import torch.nn.functional as F
5+
66
from torch.autograd import Variable
77
from torch.utils.data import DataLoader
88
from torchvision import transforms
@@ -17,7 +17,7 @@
1717
transform=transforms.ToTensor(),
1818
download=True)
1919

20-
test_dataset = datasets.MNIST(root='./data', train=False,
20+
test_dataset = datasets.MNIST(root='./data', train=False,
2121
transform=transforms.ToTensor())
2222

2323
train_loader = DataLoader(train_dataset, batch_size=batch_size,

11-backward/backward.py

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
import torch as t
2+
from torch.autograd import Variable as v
3+
4+
# simple gradient
5+
a = v(t.FloatTensor([2, 3]), requires_grad=True)
6+
b = a + 3
7+
c = b * b * 3
8+
out = c.mean()
9+
out.backward()
10+
print('*'*10)
11+
print('=====simple gradient======')
12+
print('input')
13+
print(a.data)
14+
print('compute result is')
15+
print(out.data[0])
16+
print('input gradients are')
17+
print(a.grad.data)
18+
19+
# backward on non-scalar output
20+
m = v(t.FloatTensor([[2, 3]]), requires_grad=True)
21+
n = v(t.zeros(1, 2))
22+
n[0, 0] = m[0, 0] ** 2
23+
n[0, 1] = m[0, 1] ** 3
24+
n.backward(t.FloatTensor([[1, 1]]))
25+
print('*'*10)
26+
print('=====non scalar output======')
27+
print('input')
28+
print(m.data)
29+
print('input gradients are')
30+
print(m.grad.data)
31+
32+
# jacobian
33+
j = t.zeros(2 ,2)
34+
k = v(t.zeros(1, 2))
35+
m.grad.data.zero_()
36+
k[0, 0] = m[0, 0] ** 2 + 3 * m[0 ,1]
37+
k[0, 1] = m[0, 1] ** 2 + 2 * m[0, 0]
38+
k.backward(t.FloatTensor([[1, 0]]), retain_variables=True)
39+
j[:, 0] = m.grad.data
40+
m.grad.data.zero_()
41+
k.backward(t.FloatTensor([[0, 1]]))
42+
j[:, 1] = m.grad.data
43+
print('jacobian matrix is')
44+
print(j)
45+
46+
# compute jacobian matrix
47+
x = t.FloatTensor([2, 1]).view(1, 2)
48+
x = v(x, requires_grad=True)
49+
y = v(t.FloatTensor([[1, 2], [3, 4]]))
50+
51+
z = t.mm(x, y)
52+
jacobian = t.zeros((2, 2))
53+
z.backward(t.FloatTensor([[1, 0]]), retain_variables=True) # dz1/dx1, dz2/dx1
54+
jacobian[:, 0] = x.grad.data
55+
x.grad.data.zero_()
56+
z.backward(t.FloatTensor([[0, 1]])) # dz1/dx2, dz2/dx2
57+
jacobian[:, 1] = x.grad.data
58+
print('=========jacobian========')
59+
print('x')
60+
print(x.data)
61+
print('y')
62+
print(y.data)
63+
print('compute result')
64+
print(z.data)
65+
print('jacobian matrix is')
66+
print(jacobian)

0 commit comments

Comments
 (0)