Skip to content

Commit

Permalink
Bump version
Browse files Browse the repository at this point in the history
- include batch norm bsum in deterministic backend setting
- modify ArrayIterator to remove duplicated code
  • Loading branch information
apark263 committed Feb 24, 2016
1 parent 4baa314 commit 8ac7b71
Show file tree
Hide file tree
Showing 5 changed files with 41 additions and 48 deletions.
6 changes: 2 additions & 4 deletions examples/fast_rcnn_alexnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,12 +78,10 @@ def load_imagenet_weights(model, path):

param_layers = [l for l in model.layers.layers[0].layers[0].layers]
param_dict_list = pdict['model']['config']['layers']
i = 0
for layer, ps in zip(param_layers, param_dict_list):
i = i+1
print i, layer.name
print layer.name, ps['config']['name']
layer.load_weights(ps, load_states=True)
if i == 17:
if ps['config']['name'] == 'Pooling_2':
print 'Only load the pre-trained weights up to conv5 layer of Alexnet'
break

Expand Down
2 changes: 2 additions & 0 deletions neon/backends/nervanagpu.py
Original file line number Diff line number Diff line change
Expand Up @@ -2240,6 +2240,8 @@ def copy_transpose(self, a, out, axes=None, repeat=1):
assert a.size == out.size
assert a.gpudata != out.gpudata

assert out.is_contiguous, "Output array in copy_transpose() must be contiguous"

if axes is None:
axes = tuple(range(len(a.shape)-1,-1,-1))
elif type(axes) is not tuple:
Expand Down
76 changes: 34 additions & 42 deletions neon/data/dataiterator.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,37 +75,40 @@ def __init__(self, X, y=None, nclass=None, lshape=None, make_onehot=True, name=N
super(ArrayIterator, self).__init__(name=name)
X = X if isinstance(X, list) else [X]
self.ndata = len(X[0])
assert self.ndata >= self.be.bsz
self.start = 0
self.nclass = nclass
self.ybuf = None

# on device tensor with full dataset
self.Xdev = [self.be.array(x) for x in X]
# mini-batch sized buffer
self.Xbuf = [self.be.iobuf(x.shape[1]) for x in X]

if lshape is not None:
self.shape = [lshape for x in X]
else:
self.shape = [x.shape[1] for x in X]
if make_onehot and nclass is None and y is not None:
raise AttributeError('Must provide number of classes when creating onehot labels')

# store shape of the input data
self.shape = [x.shape[1] if lshape is None else lshape for x in X]
if len(self.shape) == 1:
self.shape = self.shape[0]

# store shape of the input data
self.lshape = lshape

assert self.ndata >= self.be.bsz
# Helpers to make dataset, minibatch, unpacking function for transpose and onehot
def transpose_gen(z):
return (self.be.array(z), self.be.iobuf(z.shape[1]),
lambda _in, _out: _in.transpose(_out))

def onehot_gen(z):
return (self.be.array(z.reshape((-1, 1)), dtype=np.int32), self.be.iobuf(nclass),
lambda _in, _out: self.be.onehot(_in, axis=0, out=_out))

self.Xdev, self.Xbuf, self.unpack_func = zip(*[transpose_gen(x) for x in X])

# Shallow copies for appending, iterating
self.dbuf, self.hbuf = list(self.Xdev), list(self.Xbuf)
self.unpack_func = list(self.unpack_func)

self.ybuf = None
self.make_onehot = make_onehot
if y is not None:
if make_onehot:
assert nclass is not None
self.ydev = self.be.array(y.reshape((-1, 1)), dtype=np.int32)
self.ybuf = self.be.iobuf(nclass)
else:
self.ydev = self.be.array(y)
self.ybuf = self.be.iobuf(y.shape[1])
self.ydev, self.ybuf, yfunc = onehot_gen(y) if make_onehot else transpose_gen(y)
self.dbuf.append(self.ydev)
self.hbuf.append(self.ybuf)
self.unpack_func.append(yfunc)

@property
def nbatches(self):
Expand All @@ -125,31 +128,20 @@ def __iter__(self):
Defines a generator that can be used to iterate over this dataset.
Yields:
tuple: The next minibatch. A minibatch includes both features and
labels.
tuple: The next minibatch which includes both features and labels.
"""
for i1 in range(self.start, self.ndata, self.be.bsz):
i2 = min(i1 + self.be.bsz, self.ndata)
bsz = i2 - i1
if i2 == self.ndata:
bsz = min(self.be.bsz, self.ndata - i1)
islice1, oslice1 = slice(0, bsz), slice(i1, i1 + bsz)
islice2, oslice2 = None, None
if self.be.bsz > bsz:
islice2, oslice2 = slice(bsz, None), slice(0, self.be.bsz - bsz)
self.start = self.be.bsz - bsz

for xbuf, xdev in zip(self.Xbuf, self.Xdev):
self.be.copy_transpose(xdev[i1:i2], xbuf[:, :bsz])
if self.be.bsz > bsz:
self.be.copy_transpose(xdev[:(self.be.bsz - bsz)], xbuf[:, bsz:])

if self.ybuf is not None:
if self.make_onehot:
self.ybuf[:, :bsz] = self.be.onehot(
self.ydev[i1:i2], axis=0)
if self.be.bsz > bsz:
self.ybuf[:, bsz:] = self.be.onehot(
self.ydev[:(self.be.bsz - bsz)], axis=0)
else:
self.be.copy_transpose(self.ydev[i1:i2], self.ybuf[:, :bsz])
if self.be.bsz > bsz:
self.be.copy_transpose(self.ydev[:(self.be.bsz - bsz)], self.ybuf[:, bsz:])
for buf, dev, unpack_func in zip(self.hbuf, self.dbuf, self.unpack_func):
unpack_func(dev[oslice1], buf[:, islice1])
if oslice2:
unpack_func(dev[oslice2], buf[:, islice2])

inputs = self.Xbuf[0] if len(self.Xbuf) == 1 else self.Xbuf
targets = self.ybuf if self.ybuf else inputs
Expand Down
3 changes: 2 additions & 1 deletion neon/layers/layer.py
Original file line number Diff line number Diff line change
Expand Up @@ -580,6 +580,7 @@ def __init__(self, fshape, strides={}, padding={}, init=None, bsum=False,
name=None, parallelism="Data"):
super(Convolution, self).__init__(init, name, parallelism)
self.nglayer = None
bsum = bsum and not self.be.deterministic
self.convparams = {'str_h': 1, 'str_w': 1, 'str_d': 1,
'pad_h': 0, 'pad_w': 0, 'pad_d': 0,
'T': 1, 'D': 1, 'bsum': bsum} # 3D paramaters
Expand Down Expand Up @@ -746,7 +747,7 @@ def __init__(self, nout, init, bsum=False, name=None):
super(Linear, self).__init__(init, name, "Disabled")
self.nout = nout
self.inputs = None
self.bsum = bsum
self.bsum = bsum and not self.be.deterministic

def __str__(self):
return "Linear Layer '%s': %d inputs, %d outputs" % (
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@
import subprocess

# Define version information
VERSION = '1.2.1'
VERSION = '1.2.2'
FULLVERSION = VERSION
write_version = True

Expand Down

0 comments on commit 8ac7b71

Please sign in to comment.