Skip to content

Commit

Permalink
Merge branch 'master' into apibreak
Browse files Browse the repository at this point in the history
Conflicts:
	neo/core/analogsignal.py
	neo/core/analogsignalarray.py
	neo/core/segment.py
	neo/io/blackrockio.py
	neo/io/neuroexplorerio.py
	neo/io/spike2io.py
  • Loading branch information
apdavison committed Jun 23, 2016
2 parents 35d82c2 + b2a6944 commit 963db9b
Show file tree
Hide file tree
Showing 4 changed files with 124 additions and 25 deletions.
32 changes: 32 additions & 0 deletions neo/core/segment.py
Original file line number Diff line number Diff line change
Expand Up @@ -97,6 +97,38 @@ def __init__(self, name=None, description=None, file_origin=None,
self.rec_datetime = rec_datetime
self.index = index

# t_start attribute is handled as a property so type checking can be done
@property
def t_start(self):
'''
Time when first signal begins.
'''
t_starts = [sig.t_start for sig in self.analogsignals + self.spiketrains + self.irregularlysampledsignals]
t_starts += [e.times[0] for e in self.epochs + self.events if len(e.times) > 0]

# t_start is not defined if no children are present
if len(t_starts)==0:
return None

t_start = min(t_starts)
return t_start

# t_stop attribute is handled as a property so type checking can be done
@property
def t_stop(self):
'''
Time when last signal ends.
'''
t_stops = [sig.t_stop for sig in self.analogsignals + self.spiketrains + self.irregularlysampledsignals]
t_stops += [e.times[-1] for e in self.epochs + self.events if len(e.times) > 0]

# t_stop is not defined if no children are present
if len(t_stops)==0:
return None

t_stop = max(t_stops)
return t_stop

def take_spiketrains_by_unit(self, unit_list=None):
'''
Return :class:`SpikeTrains` in the :class:`Segment` that are also in a
Expand Down
66 changes: 43 additions & 23 deletions neo/io/spike2io.py
Original file line number Diff line number Diff line change
Expand Up @@ -66,15 +66,22 @@ class Spike2IO(BaseIO):

mode = 'file'

def __init__(self, filename=None):
ced_units = False

def __init__(self, filename=None, ced_units=False):
"""
This class read a smr file.
This class reads an smr file.
Arguments:
filename : the filename
ced_units: whether a spike trains should be added for each unit
as determined by Spike2's spike sorting (True), or if a spike
channel should be considered a single unit and will ignore
Spike2's spike sorting (False). Defaults to False.
"""
BaseIO.__init__(self)
self.filename = filename
self.ced_units = ced_units

def read_segment(self, take_ideal_sampling_rate=False,
lazy=False, cascade=True):
Expand Down Expand Up @@ -132,11 +139,12 @@ def addannotations(ob, channelHeader):
seg.events.append(ea)

elif channelHeader.kind in [6, 7]:
sptr = self.read_one_channel_event_or_spike(
sptrs = self.read_one_channel_event_or_spike(
fid, i, header, lazy=lazy)
if sptr is not None:
addannotations(sptr, channelHeader)
seg.spiketrains.append(sptr)
if sptrs is not None:
for sptr in sptrs:
addannotations(sptr, channelHeader)
seg.spiketrains.append(sptr)

fid.close()

Expand Down Expand Up @@ -165,23 +173,21 @@ def read_header(self, filename=''):
('offset', 'f4'),
('unit', 'S6'), ]
channelHeader += HeaderReader(fid, np.dtype(dt))

if header.system_id < 6:
channelHeader += HeaderReader(fid, np.dtype(
[('divide', 'i4')])) # i8
else:
channelHeader += HeaderReader(fid, np.dtype(
[('interleave', 'i4')])) # i8
channelHeader += HeaderReader(fid, np.dtype([ ('divide' , 'i2')]) )
else :
channelHeader +=HeaderReader(fid, np.dtype([ ('interleave' , 'i2')]) )

if channelHeader.kind in [7, 9]:
dt = [('min', 'f4'),
('max', 'f4'),
('unit', 'S6'), ]
channelHeader += HeaderReader(fid, np.dtype(dt))
if header.system_id < 6:
channelHeader += HeaderReader(fid, np.dtype(
[('divide', 'i4')])) # i8
else:
channelHeader += HeaderReader(fid, np.dtype(
[('interleave', 'i4')])) # i8
channelHeader += HeaderReader(fid, np.dtype([ ('divide' , 'i2')]))
else :
channelHeader += HeaderReader(fid, np.dtype([ ('interleave' , 'i2')]) )
if channelHeader.kind in [4]:
dt = [('init_low', 'u1'),
('next_low', 'u1'), ]
Expand Down Expand Up @@ -352,7 +358,7 @@ def read_one_channel_event_or_spike(self, fid, channel_num, header,
elif channelHeader.kind in [6, 7]:
# correct value for t_stop to be put in later
sptr = SpikeTrain([] * pq.s, t_stop=1e99)
sptr.annotate(channel_index=channel_num)
sptr.annotate(channel_index=channel_num, ced_unit = 0)
sptr.lazy_shape = totalitems
return sptr
else:
Expand Down Expand Up @@ -431,13 +437,27 @@ def read_one_channel_event_or_spike(self, fid, channel_num, header,
t_stop = alltimes.max()
else:
t_stop = 0.0
sptr = SpikeTrain(alltimes,
waveforms=waveforms * unit,
sampling_rate=(1. / sample_interval) * pq.Hz,
t_stop=t_stop)
sptr.annotate(channel_index=channel_num)

return sptr
if not self.ced_units:
sptr = SpikeTrain(alltimes,
waveforms = waveforms*unit,
sampling_rate = (1./sample_interval)*pq.Hz,
t_stop = t_stop
)
sptr.annotate(channel_index = channel_num, ced_unit = 0)
return [sptr]

sptrs = []
for i in set(alltrigs['marker'] & 255):
sptr = SpikeTrain(alltimes[alltrigs['marker'] == i],
waveforms = waveforms[alltrigs['marker'] == i]*unit,
sampling_rate = (1./sample_interval)*pq.Hz,
t_stop = t_stop
)
sptr.annotate(channel_index = channel_num, ced_unit = i)
sptrs.append(sptr)

return sptrs


class HeaderReader(object):
Expand Down
24 changes: 22 additions & 2 deletions neo/io/tdtio.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,8 @@ def read_segment(self, blockname=None, lazy=False, cascade=True):

if blockname == 'TempBlk': return None

if not self.is_tdtblock(blockname): return None # if not a tdt block

subdir = os.path.join(self.dirname, blockname)
if not os.path.isdir(subdir): return None

Expand Down Expand Up @@ -237,13 +239,31 @@ def read_block(self, lazy=False, cascade=True):
if not cascade : return bl

for blockname in os.listdir(self.dirname):
seg = self.read_segment(blockname, lazy, cascade)
bl.segments.append(seg)
if self.is_tdtblock(blockname): # if the folder is a tdt block
seg = self.read_segment(blockname, lazy, cascade)
bl.segments.append(seg)

bl.create_many_to_one_relationship()
return bl


# to determine if this folder is a TDT block, based on the extension of the files inside it
# to deal with unexpected files in the tank, e.g. .DS_Store on Mac machines
def is_tdtblock(self, blockname):

file_ext = list()
blockpath = os.path.join(self.dirname, blockname) # get block path
if os.path.isdir(blockpath):
for file in os.listdir( blockpath ): # for every file, get extension, convert to lowercase and append
file_ext.append( os.path.splitext( file )[1].lower() )

file_ext = set(file_ext)
tdt_ext = set(['.tbk', '.tdx', '.tev', '.tsq'])
if file_ext >= tdt_ext: # if containing all the necessary files
return True
else:
return False

tdt_event_type = [
#(0x0,'EVTYPE_UNKNOWN'),
(0x101, 'EVTYPE_STRON'),
Expand Down
27 changes: 27 additions & 0 deletions neo/test/coretest/test_segment.py
Original file line number Diff line number Diff line change
Expand Up @@ -205,6 +205,33 @@ def test__creation(self):
self.check_creation(self.seg1)
self.check_creation(self.seg2)

def test_times(self):

for seg in [self.seg1,self.seg2]:
# calculate target values for t_start and t_stop
t_starts, t_stops = [], []
for children in [seg.analogsignals,
seg.epochs,
seg.events,
seg.irregularlysampledsignals,
seg.spiketrains]:
for child in children:
if hasattr(child,'t_start'):
t_starts.append(child.t_start)
if hasattr(child,'t_stop'):
t_stops.append(child.t_stop)
if hasattr(child,'time'):
t_starts.append(child.time)
t_stops.append(child.time)
if hasattr(child,'times'):
t_starts.append(child.times[0])
t_stops.append(child.times[-1])
targ_t_start = min(t_starts)
targ_t_stop = max(t_stops)

self.assertEqual(seg.t_start,targ_t_start)
self.assertEqual(seg.t_stop,targ_t_stop)

def test__merge(self):
seg1a = fake_neo(Block, seed=self.seed1, n=self.nchildren).segments[0]
assert_same_sub_schema(self.seg1, seg1a)
Expand Down

0 comments on commit 963db9b

Please sign in to comment.