Skip to content

Commit 963db9b

Browse files
committed
Merge branch 'master' into apibreak
Conflicts: neo/core/analogsignal.py neo/core/analogsignalarray.py neo/core/segment.py neo/io/blackrockio.py neo/io/neuroexplorerio.py neo/io/spike2io.py
2 parents 35d82c2 + b2a6944 commit 963db9b

File tree

4 files changed

+124
-25
lines changed

4 files changed

+124
-25
lines changed

neo/core/segment.py

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,6 +97,38 @@ def __init__(self, name=None, description=None, file_origin=None,
9797
self.rec_datetime = rec_datetime
9898
self.index = index
9999

100+
# t_start attribute is handled as a property so type checking can be done
101+
@property
102+
def t_start(self):
103+
'''
104+
Time when first signal begins.
105+
'''
106+
t_starts = [sig.t_start for sig in self.analogsignals + self.spiketrains + self.irregularlysampledsignals]
107+
t_starts += [e.times[0] for e in self.epochs + self.events if len(e.times) > 0]
108+
109+
# t_start is not defined if no children are present
110+
if len(t_starts)==0:
111+
return None
112+
113+
t_start = min(t_starts)
114+
return t_start
115+
116+
# t_stop attribute is handled as a property so type checking can be done
117+
@property
118+
def t_stop(self):
119+
'''
120+
Time when last signal ends.
121+
'''
122+
t_stops = [sig.t_stop for sig in self.analogsignals + self.spiketrains + self.irregularlysampledsignals]
123+
t_stops += [e.times[-1] for e in self.epochs + self.events if len(e.times) > 0]
124+
125+
# t_stop is not defined if no children are present
126+
if len(t_stops)==0:
127+
return None
128+
129+
t_stop = max(t_stops)
130+
return t_stop
131+
100132
def take_spiketrains_by_unit(self, unit_list=None):
101133
'''
102134
Return :class:`SpikeTrains` in the :class:`Segment` that are also in a

neo/io/spike2io.py

Lines changed: 43 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -66,15 +66,22 @@ class Spike2IO(BaseIO):
6666

6767
mode = 'file'
6868

69-
def __init__(self, filename=None):
69+
ced_units = False
70+
71+
def __init__(self, filename=None, ced_units=False):
7072
"""
71-
This class read a smr file.
73+
This class reads an smr file.
7274
7375
Arguments:
7476
filename : the filename
77+
ced_units: whether a spike trains should be added for each unit
78+
as determined by Spike2's spike sorting (True), or if a spike
79+
channel should be considered a single unit and will ignore
80+
Spike2's spike sorting (False). Defaults to False.
7581
"""
7682
BaseIO.__init__(self)
7783
self.filename = filename
84+
self.ced_units = ced_units
7885

7986
def read_segment(self, take_ideal_sampling_rate=False,
8087
lazy=False, cascade=True):
@@ -132,11 +139,12 @@ def addannotations(ob, channelHeader):
132139
seg.events.append(ea)
133140

134141
elif channelHeader.kind in [6, 7]:
135-
sptr = self.read_one_channel_event_or_spike(
142+
sptrs = self.read_one_channel_event_or_spike(
136143
fid, i, header, lazy=lazy)
137-
if sptr is not None:
138-
addannotations(sptr, channelHeader)
139-
seg.spiketrains.append(sptr)
144+
if sptrs is not None:
145+
for sptr in sptrs:
146+
addannotations(sptr, channelHeader)
147+
seg.spiketrains.append(sptr)
140148

141149
fid.close()
142150

@@ -165,23 +173,21 @@ def read_header(self, filename=''):
165173
('offset', 'f4'),
166174
('unit', 'S6'), ]
167175
channelHeader += HeaderReader(fid, np.dtype(dt))
176+
168177
if header.system_id < 6:
169-
channelHeader += HeaderReader(fid, np.dtype(
170-
[('divide', 'i4')])) # i8
171-
else:
172-
channelHeader += HeaderReader(fid, np.dtype(
173-
[('interleave', 'i4')])) # i8
178+
channelHeader += HeaderReader(fid, np.dtype([ ('divide' , 'i2')]) )
179+
else :
180+
channelHeader +=HeaderReader(fid, np.dtype([ ('interleave' , 'i2')]) )
181+
174182
if channelHeader.kind in [7, 9]:
175183
dt = [('min', 'f4'),
176184
('max', 'f4'),
177185
('unit', 'S6'), ]
178186
channelHeader += HeaderReader(fid, np.dtype(dt))
179187
if header.system_id < 6:
180-
channelHeader += HeaderReader(fid, np.dtype(
181-
[('divide', 'i4')])) # i8
182-
else:
183-
channelHeader += HeaderReader(fid, np.dtype(
184-
[('interleave', 'i4')])) # i8
188+
channelHeader += HeaderReader(fid, np.dtype([ ('divide' , 'i2')]))
189+
else :
190+
channelHeader += HeaderReader(fid, np.dtype([ ('interleave' , 'i2')]) )
185191
if channelHeader.kind in [4]:
186192
dt = [('init_low', 'u1'),
187193
('next_low', 'u1'), ]
@@ -352,7 +358,7 @@ def read_one_channel_event_or_spike(self, fid, channel_num, header,
352358
elif channelHeader.kind in [6, 7]:
353359
# correct value for t_stop to be put in later
354360
sptr = SpikeTrain([] * pq.s, t_stop=1e99)
355-
sptr.annotate(channel_index=channel_num)
361+
sptr.annotate(channel_index=channel_num, ced_unit = 0)
356362
sptr.lazy_shape = totalitems
357363
return sptr
358364
else:
@@ -431,13 +437,27 @@ def read_one_channel_event_or_spike(self, fid, channel_num, header,
431437
t_stop = alltimes.max()
432438
else:
433439
t_stop = 0.0
434-
sptr = SpikeTrain(alltimes,
435-
waveforms=waveforms * unit,
436-
sampling_rate=(1. / sample_interval) * pq.Hz,
437-
t_stop=t_stop)
438-
sptr.annotate(channel_index=channel_num)
439440

440-
return sptr
441+
if not self.ced_units:
442+
sptr = SpikeTrain(alltimes,
443+
waveforms = waveforms*unit,
444+
sampling_rate = (1./sample_interval)*pq.Hz,
445+
t_stop = t_stop
446+
)
447+
sptr.annotate(channel_index = channel_num, ced_unit = 0)
448+
return [sptr]
449+
450+
sptrs = []
451+
for i in set(alltrigs['marker'] & 255):
452+
sptr = SpikeTrain(alltimes[alltrigs['marker'] == i],
453+
waveforms = waveforms[alltrigs['marker'] == i]*unit,
454+
sampling_rate = (1./sample_interval)*pq.Hz,
455+
t_stop = t_stop
456+
)
457+
sptr.annotate(channel_index = channel_num, ced_unit = i)
458+
sptrs.append(sptr)
459+
460+
return sptrs
441461

442462

443463
class HeaderReader(object):

neo/io/tdtio.py

Lines changed: 22 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -104,6 +104,8 @@ def read_segment(self, blockname=None, lazy=False, cascade=True):
104104

105105
if blockname == 'TempBlk': return None
106106

107+
if not self.is_tdtblock(blockname): return None # if not a tdt block
108+
107109
subdir = os.path.join(self.dirname, blockname)
108110
if not os.path.isdir(subdir): return None
109111

@@ -237,13 +239,31 @@ def read_block(self, lazy=False, cascade=True):
237239
if not cascade : return bl
238240

239241
for blockname in os.listdir(self.dirname):
240-
seg = self.read_segment(blockname, lazy, cascade)
241-
bl.segments.append(seg)
242+
if self.is_tdtblock(blockname): # if the folder is a tdt block
243+
seg = self.read_segment(blockname, lazy, cascade)
244+
bl.segments.append(seg)
242245

243246
bl.create_many_to_one_relationship()
244247
return bl
245248

246249

250+
# to determine if this folder is a TDT block, based on the extension of the files inside it
251+
# to deal with unexpected files in the tank, e.g. .DS_Store on Mac machines
252+
def is_tdtblock(self, blockname):
253+
254+
file_ext = list()
255+
blockpath = os.path.join(self.dirname, blockname) # get block path
256+
if os.path.isdir(blockpath):
257+
for file in os.listdir( blockpath ): # for every file, get extension, convert to lowercase and append
258+
file_ext.append( os.path.splitext( file )[1].lower() )
259+
260+
file_ext = set(file_ext)
261+
tdt_ext = set(['.tbk', '.tdx', '.tev', '.tsq'])
262+
if file_ext >= tdt_ext: # if containing all the necessary files
263+
return True
264+
else:
265+
return False
266+
247267
tdt_event_type = [
248268
#(0x0,'EVTYPE_UNKNOWN'),
249269
(0x101, 'EVTYPE_STRON'),

neo/test/coretest/test_segment.py

Lines changed: 27 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -205,6 +205,33 @@ def test__creation(self):
205205
self.check_creation(self.seg1)
206206
self.check_creation(self.seg2)
207207

208+
def test_times(self):
209+
210+
for seg in [self.seg1,self.seg2]:
211+
# calculate target values for t_start and t_stop
212+
t_starts, t_stops = [], []
213+
for children in [seg.analogsignals,
214+
seg.epochs,
215+
seg.events,
216+
seg.irregularlysampledsignals,
217+
seg.spiketrains]:
218+
for child in children:
219+
if hasattr(child,'t_start'):
220+
t_starts.append(child.t_start)
221+
if hasattr(child,'t_stop'):
222+
t_stops.append(child.t_stop)
223+
if hasattr(child,'time'):
224+
t_starts.append(child.time)
225+
t_stops.append(child.time)
226+
if hasattr(child,'times'):
227+
t_starts.append(child.times[0])
228+
t_stops.append(child.times[-1])
229+
targ_t_start = min(t_starts)
230+
targ_t_stop = max(t_stops)
231+
232+
self.assertEqual(seg.t_start,targ_t_start)
233+
self.assertEqual(seg.t_stop,targ_t_stop)
234+
208235
def test__merge(self):
209236
seg1a = fake_neo(Block, seed=self.seed1, n=self.nchildren).segments[0]
210237
assert_same_sub_schema(self.seg1, seg1a)

0 commit comments

Comments
 (0)