Skip to content

Commit 88ab096

Browse files
author
sayli2212
committed
Code Files Added
Code Files Added
1 parent dcf4ff7 commit 88ab096

File tree

48 files changed

+30276
-0
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+30276
-0
lines changed

Chapter02/2. Building Blocks Of Neural Networks.ipynb

Lines changed: 672 additions & 0 deletions
Large diffs are not rendered by default.
Lines changed: 314 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,314 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "markdown",
5+
"metadata": {},
6+
"source": [
7+
"### Layers : Fundamental blocks of Neural Network"
8+
]
9+
},
10+
{
11+
"cell_type": "code",
12+
"execution_count": 20,
13+
"metadata": {},
14+
"outputs": [],
15+
"source": [
16+
"import torch\n",
17+
"from torch.nn import Linear, ReLU\n",
18+
"import torch.nn as nn\n",
19+
"import numpy as np\n",
20+
"from torch.autograd import Variable\n",
21+
"\n"
22+
]
23+
},
24+
{
25+
"cell_type": "code",
26+
"execution_count": 2,
27+
"metadata": {},
28+
"outputs": [
29+
{
30+
"data": {
31+
"text/plain": [
32+
"Variable containing:\n",
33+
" 0.6173 0.1456 -0.3605 -0.0123 -0.9308\n",
34+
"[torch.FloatTensor of size 1x5]"
35+
]
36+
},
37+
"execution_count": 2,
38+
"metadata": {},
39+
"output_type": "execute_result"
40+
}
41+
],
42+
"source": [
43+
"myLayer = Linear(in_features=10,out_features=5,bias=True)\n",
44+
"inp = Variable(torch.randn(1,10))\n",
45+
"myLayer = Linear(in_features=10,out_features=5,bias=True) \n",
46+
"myLayer(inp)"
47+
]
48+
},
49+
{
50+
"cell_type": "code",
51+
"execution_count": 3,
52+
"metadata": {},
53+
"outputs": [
54+
{
55+
"data": {
56+
"text/plain": [
57+
"Parameter containing:\n",
58+
"-0.1077 0.2764 -0.0719 -0.1307 0.1424 -0.2288 -0.1666 0.1439 0.3042 -0.2801\n",
59+
"-0.1775 0.2681 -0.0646 -0.1185 -0.3148 -0.1193 0.0045 0.1840 -0.2018 0.0299\n",
60+
" 0.1641 0.0864 -0.0993 0.0208 0.0890 0.1002 -0.1926 -0.0080 -0.1631 -0.0869\n",
61+
" 0.2058 0.1669 0.1589 -0.2068 -0.2168 0.1223 -0.2303 -0.1564 -0.1564 -0.2926\n",
62+
"-0.3048 -0.2558 -0.2374 -0.0940 -0.0869 0.1781 -0.0560 -0.2655 0.1365 0.1312\n",
63+
"[torch.FloatTensor of size 5x10]"
64+
]
65+
},
66+
"execution_count": 3,
67+
"metadata": {},
68+
"output_type": "execute_result"
69+
}
70+
],
71+
"source": [
72+
"myLayer.weight"
73+
]
74+
},
75+
{
76+
"cell_type": "code",
77+
"execution_count": 4,
78+
"metadata": {},
79+
"outputs": [
80+
{
81+
"data": {
82+
"text/plain": [
83+
"Parameter containing:\n",
84+
" 0.3093\n",
85+
"-0.1218\n",
86+
"-0.2188\n",
87+
" 0.0249\n",
88+
"-0.1592\n",
89+
"[torch.FloatTensor of size 5]"
90+
]
91+
},
92+
"execution_count": 4,
93+
"metadata": {},
94+
"output_type": "execute_result"
95+
}
96+
],
97+
"source": [
98+
"myLayer.bias"
99+
]
100+
},
101+
{
102+
"cell_type": "markdown",
103+
"metadata": {},
104+
"source": [
105+
"### Stacking Linear layers"
106+
]
107+
},
108+
{
109+
"cell_type": "code",
110+
"execution_count": 5,
111+
"metadata": {},
112+
"outputs": [
113+
{
114+
"data": {
115+
"text/plain": [
116+
"Variable containing:\n",
117+
" 0.4065 -0.7448\n",
118+
"[torch.FloatTensor of size 1x2]"
119+
]
120+
},
121+
"execution_count": 5,
122+
"metadata": {},
123+
"output_type": "execute_result"
124+
}
125+
],
126+
"source": [
127+
"myLayer1 = Linear(10,5)\n",
128+
"myLayer2 = Linear(5,2)\n",
129+
"myLayer2(myLayer1(inp))"
130+
]
131+
},
132+
{
133+
"cell_type": "markdown",
134+
"metadata": {},
135+
"source": [
136+
"### PyTorch Non-linear Activations"
137+
]
138+
},
139+
{
140+
"cell_type": "code",
141+
"execution_count": 21,
142+
"metadata": {},
143+
"outputs": [
144+
{
145+
"data": {
146+
"text/plain": [
147+
"Variable containing:\n",
148+
" 1 2 0 0\n",
149+
"[torch.FloatTensor of size 1x4]"
150+
]
151+
},
152+
"execution_count": 21,
153+
"metadata": {},
154+
"output_type": "execute_result"
155+
}
156+
],
157+
"source": [
158+
"sample_data = Variable(torch.Tensor([[1,2,-1,-1]])) \n",
159+
"myRelu = ReLU()\n",
160+
"myRelu(sample_data)\n"
161+
]
162+
},
163+
{
164+
"cell_type": "code",
165+
"execution_count": 22,
166+
"metadata": {},
167+
"outputs": [
168+
{
169+
"data": {
170+
"text/plain": [
171+
"Variable containing:\n",
172+
" 1 2 0 0\n",
173+
"[torch.FloatTensor of size 1x4]"
174+
]
175+
},
176+
"execution_count": 22,
177+
"metadata": {},
178+
"output_type": "execute_result"
179+
}
180+
],
181+
"source": [
182+
"import torch.nn as nn\n",
183+
"import torch.nn.functional as F\n",
184+
"sample_data = Variable(torch.Tensor([[1,2,-1,-1]])) \n",
185+
"f = F.relu(sample_data) # Much simpler.\n",
186+
"f"
187+
]
188+
},
189+
{
190+
"cell_type": "markdown",
191+
"metadata": {},
192+
"source": [
193+
"### Neural Network "
194+
]
195+
},
196+
{
197+
"cell_type": "code",
198+
"execution_count": 24,
199+
"metadata": {
200+
"collapsed": true
201+
},
202+
"outputs": [],
203+
"source": [
204+
"class MyFirstNetwork(nn.Module):\n",
205+
" def __init__(self,input_size,hidden_size,output_size):\n",
206+
" super(MyFirstNetwork,self).__init__() \n",
207+
" self.layer1 = nn.Linear(input_size,hidden_size) \n",
208+
" self.layer2 = nn.Linear(hidden_size,output_size)\n",
209+
" def __forward__(self,input): \n",
210+
" out = self.layer1(input) \n",
211+
" out = nn.ReLU(out)\n",
212+
" out = self.layer2(out) \n",
213+
" return out"
214+
]
215+
},
216+
{
217+
"cell_type": "markdown",
218+
"metadata": {},
219+
"source": [
220+
"### Loss"
221+
]
222+
},
223+
{
224+
"cell_type": "code",
225+
"execution_count": 27,
226+
"metadata": {
227+
"collapsed": true
228+
},
229+
"outputs": [],
230+
"source": [
231+
"loss = nn.MSELoss()\n",
232+
"input = Variable(torch.randn(3, 5), requires_grad=True) \n",
233+
"target = Variable(torch.randn(3, 5))\n",
234+
"output = loss(input, target)\n",
235+
"output.backward()"
236+
]
237+
},
238+
{
239+
"cell_type": "code",
240+
"execution_count": 29,
241+
"metadata": {
242+
"collapsed": true
243+
},
244+
"outputs": [],
245+
"source": [
246+
"def cross_entropy(true_label, prediction):\n",
247+
" if true_label == 1:\n",
248+
" return -log(prediction)\n",
249+
" else:\n",
250+
" return -log(1 - prediction)"
251+
]
252+
},
253+
{
254+
"cell_type": "code",
255+
"execution_count": 30,
256+
"metadata": {
257+
"collapsed": true
258+
},
259+
"outputs": [],
260+
"source": [
261+
"loss = nn.CrossEntropyLoss()\n",
262+
"input = Variable(torch.randn(3, 5), requires_grad=True) \n",
263+
"target = Variable(torch.LongTensor(3).random_(5)) \n",
264+
"output = loss(input, target)\n",
265+
"output.backward()"
266+
]
267+
},
268+
{
269+
"cell_type": "markdown",
270+
"metadata": {},
271+
"source": [
272+
"### Optimizer"
273+
]
274+
},
275+
{
276+
"cell_type": "code",
277+
"execution_count": null,
278+
"metadata": {},
279+
"outputs": [],
280+
"source": [
281+
"# for demo\n",
282+
"import torch.optim as optim\n",
283+
"optimizer = optim.SGD(model.parameters(), lr = 0.01)\n",
284+
"for input, target in dataset:\n",
285+
" optimizer.zero_grad()\n",
286+
" output = model(input)\n",
287+
" loss = loss_fn(output, target)\n",
288+
" loss.backward()\n",
289+
" optimizer.step()"
290+
]
291+
}
292+
],
293+
"metadata": {
294+
"kernelspec": {
295+
"display_name": "Python 3",
296+
"language": "python",
297+
"name": "python3"
298+
},
299+
"language_info": {
300+
"codemirror_mode": {
301+
"name": "ipython",
302+
"version": 3
303+
},
304+
"file_extension": ".py",
305+
"mimetype": "text/x-python",
306+
"name": "python",
307+
"nbconvert_exporter": "python",
308+
"pygments_lexer": "ipython3",
309+
"version": "3.6.3"
310+
}
311+
},
312+
"nbformat": 4,
313+
"nbformat_minor": 2
314+
}

0 commit comments

Comments
 (0)