Skip to content

Commit 89ae992

Browse files
committed
fixed typo when averaging (wasn't slicing) [reran]
1 parent 2df636d commit 89ae992

File tree

4 files changed

+1370
-1353
lines changed

4 files changed

+1370
-1353
lines changed

README.ipynb

+1,326-1,325
Large diffs are not rendered by default.

README.md

+44-28
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
21
# SmallPebble
32

43
[![](https://github.com/sradc/smallpebble/workflows/Python%20package/badge.svg)](https://github.com/sradc/smallpebble/commits/)
@@ -79,7 +78,9 @@ plt.show()
7978
```
8079

8180

82-
![png](https://raw.githubusercontent.com/sradc/SmallPebble/master/readme_files/readme_4_0.png)
81+
82+
![png](https://raw.githubusercontent.com/sradc/SmallPebble/master/readme_files/README_4_0.png)
83+
8384

8485

8586

@@ -113,7 +114,7 @@ BATCH_SIZE = 200
113114
eval_batch = sp.batch(X_eval, y_eval, BATCH_SIZE)
114115
adam = sp.Adam() # Adam optimization
115116

116-
for i, (xbatch, ybatch) in tqdm(enumerate(sp.batch(X, y, BATCH_SIZE)), total=NUM_EPOCHS):
117+
for i, (xbatch, ybatch) in tqdm(enumerate(sp.batch(X, y, BATCH_SIZE)), total=NUM_ITERS):
117118
if i >= NUM_ITERS: break
118119

119120
X_in.assign_value(sp.Variable(xbatch))
@@ -138,7 +139,7 @@ for i, (xbatch, ybatch) in tqdm(enumerate(sp.batch(X, y, BATCH_SIZE)), total=NUM
138139
validation_acc.append(accuracy)
139140

140141
# Plot results:
141-
print(f'Final validation accuracy: {validation_acc[-10].mean()}')
142+
print(f'Final validation accuracy: {np.mean(validation_acc[-10:])}')
142143
plt.figure(figsize=(14, 4))
143144
plt.subplot(1, 2, 1)
144145
plt.ylabel('Loss')
@@ -153,15 +154,21 @@ plt.plot(validation_acc)
153154
plt.show()
154155
```
155156

156-
Final validation accuracy: 0.935
157+
158+
HBox(children=(FloatProgress(value=0.0, max=300.0), HTML(value='')))
157159

158160

161+
Final validation accuracy: 0.9400000000000001
159162

160-
![png](https://raw.githubusercontent.com/sradc/SmallPebble/master/readme_files/readme_6_2.png)
163+
164+
165+
166+
![png](https://raw.githubusercontent.com/sradc/SmallPebble/master/readme_files/README_6_2.png)
167+
161168

162169

163170
## Training a convolutional neural network on CIFAR-10, using CuPy
164-
This was run on [Google Colab](https://colab.research.google.com/), with a GPU (for ~10-15 mins).
171+
This was run on [Google Colab](https://colab.research.google.com/), with a GPU.
165172

166173

167174
```python
@@ -194,7 +201,9 @@ plt.show()
194201
```
195202

196203

197-
![png](https://raw.githubusercontent.com/sradc/SmallPebble/master/readme_files/readme_9_0.png)
204+
205+
![png](https://raw.githubusercontent.com/sradc/SmallPebble/master/readme_files/README_9_0.png)
206+
198207

199208

200209

@@ -208,6 +217,7 @@ sp.use(cupy)
208217
print(sp.array_library.library.__name__) # should be 'cupy'
209218
```
210219

220+
211221
cupy
212222

213223

@@ -227,7 +237,7 @@ y_eval = y_train[45_000:50_000]
227237

228238

229239
```python
230-
"Define a model."
240+
"""Define a model."""
231241

232242
X_in = sp.Placeholder()
233243
y_true = sp.Placeholder()
@@ -278,7 +288,7 @@ BATCH_SIZE = 128
278288
eval_batch = sp.batch(X_eval, y_eval, BATCH_SIZE)
279289
adam = sp.Adam()
280290

281-
for i, (xbatch, ybatch) in tqdm(enumerate(sp.batch(X, y, BATCH_SIZE)), total=NUM_EPOCHS):
291+
for i, (xbatch, ybatch) in tqdm(enumerate(sp.batch(X, y, BATCH_SIZE)), total=NUM_ITERS):
282292
if i >= NUM_ITERS: break
283293

284294
xbatch_images = xbatch.reshape([-1, 32, 32, 3])
@@ -303,7 +313,7 @@ for i, (xbatch, ybatch) in tqdm(enumerate(sp.batch(X, y, BATCH_SIZE)), total=NUM
303313
accuracy = (y_eval_batch == predictions).mean()
304314
validation_acc.append(accuracy)
305315

306-
print(f'Final validation accuracy: {validation_acc[-10].mean()}')
316+
print(f'Final validation accuracy: {np.mean(validation_acc[-10:])}')
307317
plt.figure(figsize=(14, 4))
308318
plt.subplot(1, 2, 1)
309319
plt.ylabel('Loss')
@@ -318,14 +328,20 @@ plt.plot(validation_acc)
318328
plt.show()
319329
```
320330

321-
Final validation accuracy: 0.6640625
331+
332+
HBox(children=(FloatProgress(value=0.0, max=3000.0), HTML(value='')))
322333

323334

335+
Final validation accuracy: 0.63828125
324336

325-
![png](https://raw.githubusercontent.com/sradc/SmallPebble/master/readme_files/readme_14_2.png)
326337

327338

328-
It looks like we could improve our results by training for longer (and of course we could improve our model architecture).
339+
340+
![png](https://raw.githubusercontent.com/sradc/SmallPebble/master/readme_files/README_14_2.png)
341+
342+
343+
344+
It looks like we could improve our results by training for longer (and we could improve our model architecture).
329345

330346
---
331347

@@ -393,14 +409,14 @@ print('grad_c:\n', grad_c)
393409
```
394410

395411
y.array:
396-
[[0.83571629 1.04060209]
397-
[0.83590755 0.76613642]]
412+
[[1.32697776 1.24689392]
413+
[1.25317932 1.05037433]]
398414
grad_a:
399-
[[0.29385811 0.41138988]
400-
[0.28457185 0.00655705]]
415+
[[0.50232192 0.99209074]
416+
[0.42936606 0.19027664]]
401417
grad_b:
402-
[[0.15671755 0.67454729]
403-
[0.16250373 0.46305269]]
418+
[[0.95442445 0.34679685]
419+
[0.94471809 0.7753676 ]]
404420
grad_c:
405421
[2. 2.]
406422

@@ -420,7 +436,7 @@ print(lazy_node)
420436
print(lazy_node.run())
421437
```
422438

423-
<smallpebble.smallpebble.Lazy object at 0x7fbb90b9db50>
439+
<smallpebble.smallpebble.Lazy object at 0x7f15db527550>
424440
3
425441

426442

@@ -432,7 +448,7 @@ print(y)
432448
print(y.run())
433449
```
434450

435-
<smallpebble.smallpebble.Lazy object at 0x7fbb90b6b910>
451+
<smallpebble.smallpebble.Lazy object at 0x7f15db26ea50>
436452
10
437453

438454

@@ -451,8 +467,8 @@ print('result.array:\n', result.array)
451467
```
452468

453469
result.array:
454-
[[0.85771129 1.28521573]
455-
[1.95373653 2.6991665 ]]
470+
[[1.96367495 2.26668698]
471+
[3.94895132 5.3053362 ]]
456472

457473

458474
You can use .run() as many times as you like.
@@ -467,8 +483,8 @@ print('result.array:\n', result.array)
467483
```
468484

469485
result.array:
470-
[[ 8.57711288 12.85215729]
471-
[19.53736528 26.99166502]]
486+
[[19.63674952 22.6668698 ]
487+
[39.48951324 53.053362 ]]
472488

473489

474490
Finally, let's compute gradients:
@@ -502,6 +518,6 @@ for learnable in learnables:
502518
print(learnable)
503519
```
504520

505-
<smallpebble.smallpebble.Variable object at 0x7fbb3028c090>
506-
<smallpebble.smallpebble.Variable object at 0x7fbb90ba8b10>
521+
<smallpebble.smallpebble.Variable object at 0x7f157a263b10>
522+
<smallpebble.smallpebble.Variable object at 0x7f15d2a4ccd0>
507523

readme_files/readme_14_2.png

-338 Bytes
Loading

readme_files/readme_6_2.png

-143 Bytes
Loading

0 commit comments

Comments
 (0)