1
-
2
1
# SmallPebble
3
2
4
3
[ ![ ] ( https://github.com/sradc/smallpebble/workflows/Python%20package/badge.svg )] ( https://github.com/sradc/smallpebble/commits/ )
@@ -79,7 +78,9 @@ plt.show()
79
78
```
80
79
81
80
82
- ![ png] ( https://raw.githubusercontent.com/sradc/SmallPebble/master/readme_files/readme_4_0.png )
81
+
82
+ ![ png] ( https://raw.githubusercontent.com/sradc/SmallPebble/master/readme_files/README_4_0.png )
83
+
83
84
84
85
85
86
@@ -113,7 +114,7 @@ BATCH_SIZE = 200
113
114
eval_batch = sp.batch(X_eval, y_eval, BATCH_SIZE )
114
115
adam = sp.Adam() # Adam optimization
115
116
116
- for i, (xbatch, ybatch) in tqdm(enumerate (sp.batch(X, y, BATCH_SIZE )), total = NUM_EPOCHS ):
117
+ for i, (xbatch, ybatch) in tqdm(enumerate (sp.batch(X, y, BATCH_SIZE )), total = NUM_ITERS ):
117
118
if i >= NUM_ITERS : break
118
119
119
120
X_in.assign_value(sp.Variable(xbatch))
@@ -138,7 +139,7 @@ for i, (xbatch, ybatch) in tqdm(enumerate(sp.batch(X, y, BATCH_SIZE)), total=NUM
138
139
validation_acc.append(accuracy)
139
140
140
141
# Plot results:
141
- print (f ' Final validation accuracy: { validation_acc[- 10 ].mean( )} ' )
142
+ print (f ' Final validation accuracy: { np.mean( validation_acc[- 10 :] )} ' )
142
143
plt.figure(figsize = (14 , 4 ))
143
144
plt.subplot(1 , 2 , 1 )
144
145
plt.ylabel(' Loss' )
@@ -153,15 +154,21 @@ plt.plot(validation_acc)
153
154
plt.show()
154
155
```
155
156
156
- Final validation accuracy: 0.935
157
+
158
+ HBox(children=(FloatProgress(value=0.0, max=300.0), HTML(value='')))
157
159
158
160
161
+ Final validation accuracy: 0.9400000000000001
159
162
160
- ![ png] ( https://raw.githubusercontent.com/sradc/SmallPebble/master/readme_files/readme_6_2.png )
163
+
164
+
165
+
166
+ ![ png] ( https://raw.githubusercontent.com/sradc/SmallPebble/master/readme_files/README_6_2.png )
167
+
161
168
162
169
163
170
## Training a convolutional neural network on CIFAR-10, using CuPy
164
- This was run on [ Google Colab] ( https://colab.research.google.com/ ) , with a GPU (for ~ 10-15 mins) .
171
+ This was run on [ Google Colab] ( https://colab.research.google.com/ ) , with a GPU.
165
172
166
173
167
174
``` python
@@ -194,7 +201,9 @@ plt.show()
194
201
```
195
202
196
203
197
- ![ png] ( https://raw.githubusercontent.com/sradc/SmallPebble/master/readme_files/readme_9_0.png )
204
+
205
+ ![ png] ( https://raw.githubusercontent.com/sradc/SmallPebble/master/readme_files/README_9_0.png )
206
+
198
207
199
208
200
209
@@ -208,6 +217,7 @@ sp.use(cupy)
208
217
print (sp.array_library.library.__name__ ) # should be 'cupy'
209
218
```
210
219
220
+
211
221
cupy
212
222
213
223
@@ -227,7 +237,7 @@ y_eval = y_train[45_000:50_000]
227
237
228
238
229
239
``` python
230
- " Define a model."
240
+ """ Define a model."" "
231
241
232
242
X_in = sp.Placeholder()
233
243
y_true = sp.Placeholder()
@@ -278,7 +288,7 @@ BATCH_SIZE = 128
278
288
eval_batch = sp.batch(X_eval, y_eval, BATCH_SIZE )
279
289
adam = sp.Adam()
280
290
281
- for i, (xbatch, ybatch) in tqdm(enumerate (sp.batch(X, y, BATCH_SIZE )), total = NUM_EPOCHS ):
291
+ for i, (xbatch, ybatch) in tqdm(enumerate (sp.batch(X, y, BATCH_SIZE )), total = NUM_ITERS ):
282
292
if i >= NUM_ITERS : break
283
293
284
294
xbatch_images = xbatch.reshape([- 1 , 32 , 32 , 3 ])
@@ -303,7 +313,7 @@ for i, (xbatch, ybatch) in tqdm(enumerate(sp.batch(X, y, BATCH_SIZE)), total=NUM
303
313
accuracy = (y_eval_batch == predictions).mean()
304
314
validation_acc.append(accuracy)
305
315
306
- print (f ' Final validation accuracy: { validation_acc[- 10 ].mean( )} ' )
316
+ print (f ' Final validation accuracy: { np.mean( validation_acc[- 10 :] )} ' )
307
317
plt.figure(figsize = (14 , 4 ))
308
318
plt.subplot(1 , 2 , 1 )
309
319
plt.ylabel(' Loss' )
@@ -318,14 +328,20 @@ plt.plot(validation_acc)
318
328
plt.show()
319
329
```
320
330
321
- Final validation accuracy: 0.6640625
331
+
332
+ HBox(children=(FloatProgress(value=0.0, max=3000.0), HTML(value='')))
322
333
323
334
335
+ Final validation accuracy: 0.63828125
324
336
325
- ![ png] ( https://raw.githubusercontent.com/sradc/SmallPebble/master/readme_files/readme_14_2.png )
326
337
327
338
328
- It looks like we could improve our results by training for longer (and of course we could improve our model architecture).
339
+
340
+ ![ png] ( https://raw.githubusercontent.com/sradc/SmallPebble/master/readme_files/README_14_2.png )
341
+
342
+
343
+
344
+ It looks like we could improve our results by training for longer (and we could improve our model architecture).
329
345
330
346
---
331
347
@@ -393,14 +409,14 @@ print('grad_c:\n', grad_c)
393
409
```
394
410
395
411
y.array:
396
- [[0.83571629 1.04060209 ]
397
- [0.83590755 0.76613642 ]]
412
+ [[1.32697776 1.24689392 ]
413
+ [1.25317932 1.05037433 ]]
398
414
grad_a:
399
- [[0.29385811 0.41138988 ]
400
- [0.28457185 0.00655705 ]]
415
+ [[0.50232192 0.99209074 ]
416
+ [0.42936606 0.19027664 ]]
401
417
grad_b:
402
- [[0.15671755 0.67454729 ]
403
- [0.16250373 0.46305269 ]]
418
+ [[0.95442445 0.34679685 ]
419
+ [0.94471809 0.7753676 ]]
404
420
grad_c:
405
421
[2. 2.]
406
422
@@ -420,7 +436,7 @@ print(lazy_node)
420
436
print (lazy_node.run())
421
437
```
422
438
423
- <smallpebble.smallpebble.Lazy object at 0x7fbb90b9db50 >
439
+ <smallpebble.smallpebble.Lazy object at 0x7f15db527550 >
424
440
3
425
441
426
442
@@ -432,7 +448,7 @@ print(y)
432
448
print (y.run())
433
449
```
434
450
435
- <smallpebble.smallpebble.Lazy object at 0x7fbb90b6b910 >
451
+ <smallpebble.smallpebble.Lazy object at 0x7f15db26ea50 >
436
452
10
437
453
438
454
@@ -451,8 +467,8 @@ print('result.array:\n', result.array)
451
467
```
452
468
453
469
result.array:
454
- [[0.85771129 1.28521573 ]
455
- [1.95373653 2.6991665 ]]
470
+ [[1.96367495 2.26668698 ]
471
+ [3.94895132 5.3053362 ]]
456
472
457
473
458
474
You can use .run() as many times as you like.
@@ -467,8 +483,8 @@ print('result.array:\n', result.array)
467
483
```
468
484
469
485
result.array:
470
- [[ 8.57711288 12.85215729 ]
471
- [19.53736528 26.99166502 ]]
486
+ [[19.63674952 22.6668698 ]
487
+ [39.48951324 53.053362 ]]
472
488
473
489
474
490
Finally, let's compute gradients:
@@ -502,6 +518,6 @@ for learnable in learnables:
502
518
print (learnable)
503
519
```
504
520
505
- <smallpebble.smallpebble.Variable object at 0x7fbb3028c090 >
506
- <smallpebble.smallpebble.Variable object at 0x7fbb90ba8b10 >
521
+ <smallpebble.smallpebble.Variable object at 0x7f157a263b10 >
522
+ <smallpebble.smallpebble.Variable object at 0x7f15d2a4ccd0 >
507
523
0 commit comments