Skip to content

Commit

Permalink
unit_channels > spike_channels
Browse files Browse the repository at this point in the history
  • Loading branch information
samuelgarcia committed Feb 19, 2021
1 parent 0274b40 commit 8b5265f
Show file tree
Hide file tree
Showing 31 changed files with 185 additions and 178 deletions.
8 changes: 4 additions & 4 deletions doc/source/rawio.rst
Original file line number Diff line number Diff line change
Expand Up @@ -77,11 +77,11 @@ Then browse the internal header and display information::
nb_block: 1
nb_segment: [1]
signal_channels: [V1]
unit_channels: [Wspk1u, Wspk2u, Wspk4u, Wspk5u ... Wspk29u Wspk30u Wspk31u Wspk32u]
spike_channels: [Wspk1u, Wspk2u, Wspk4u, Wspk5u ... Wspk29u Wspk30u Wspk31u Wspk32u]
event_channels: []

You get the number of blocks and segments per block. You have information
about channels: **signal_channels**, **unit_channels**, **event_channels**.
about channels: **signal_channels**, **spike_channels**, **event_channels**.

All this information is internally available in the *header* dict::

Expand All @@ -91,7 +91,7 @@ All this information is internally available in the *header* dict::
event_channels []
nb_segment [1]
nb_block 1
unit_channels [('Wspk1u', 'ch1#0', '', 0.00146484, 0., 0, 30000.)
spike_channels [('Wspk1u', 'ch1#0', '', 0.00146484, 0., 0, 30000.)
('Wspk2u', 'ch2#0', '', 0.00146484, 0., 0, 30000.)
...

Expand Down Expand Up @@ -141,7 +141,7 @@ Inspect units channel. Each channel gives a SpikeTrain for each Segment.
Note that for many formats a physical channel can have several units after spike
sorting. So the nb_unit could be more than physical channel or signal channels.

>>> nb_unit = reader.unit_channels_count()
>>> nb_unit = reader.spike_channels_count()
>>> print('nb_unit', nb_unit)
nb_unit 30
>>> for unit_index in range(nb_unit):
Expand Down
2 changes: 1 addition & 1 deletion examples/read_files_neo_rawio.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@
print(sampling_rate, t_start, units)

# Count unit and spike per units
nb_unit = reader.unit_channels_count()
nb_unit = reader.spike_channels_count()
print('nb_unit', nb_unit)
for unit_index in range(nb_unit):
nb_spike = reader.spike_count(block_index=0, seg_index=0, unit_index=unit_index)
Expand Down
14 changes: 7 additions & 7 deletions neo/io/basefromrawio.py
Original file line number Diff line number Diff line change
Expand Up @@ -150,13 +150,13 @@ def read_block(self, block_index=0, lazy=False,
sig_groups.append(group)

if create_group_across_segment['SpikeTrain']:
unit_channels = self.header['unit_channels']
spike_channels = self.header['spike_channels']
st_groups = []
for c in range(unit_channels.size):
for c in range(spike_channels.size):
group = Group(name='SpikeTrain group {}'.format(c))
group.annotate(unit_name=unit_channels[c]['name'])
group.annotate(unit_id=unit_channels[c]['id'])
unit_annotations = self.raw_annotations['unit_channels'][c]
group.annotate(unit_name=spike_channels[c]['name'])
group.annotate(unit_id=spike_channels[c]['id'])
unit_annotations = self.raw_annotations['spike_channels'][c]
unit_annotations = check_annotations(unit_annotations)
group.annotate(**unit_annotations)
bl.groups.append(group)
Expand Down Expand Up @@ -259,8 +259,8 @@ def read_segment(self, block_index=0, seg_index=0, lazy=False,
seg.analogsignals.append(anasig)

# SpikeTrain and waveforms (optional)
unit_channels = self.header['unit_channels']
for unit_index in range(len(unit_channels)):
spike_channels = self.header['spike_channels']
for unit_index in range(len(spike_channels)):
# make a proxy...
sptr = SpikeTrainProxy(rawio=self, unit_index=unit_index,
block_index=block_index, seg_index=seg_index)
Expand Down
4 changes: 2 additions & 2 deletions neo/io/proxyobjects.py
Original file line number Diff line number Diff line change
Expand Up @@ -311,11 +311,11 @@ def __init__(self, rawio=None, unit_index=None, block_index=0, seg_index=0):
# both necessary attr and annotations
annotations = {}
for k in ('name', 'id'):
annotations[k] = self._rawio.header['unit_channels'][unit_index][k]
annotations[k] = self._rawio.header['spike_channels'][unit_index][k]
ann = self._rawio.raw_annotations['blocks'][block_index]['segments'][seg_index]['units'][unit_index]
annotations.update(ann)

h = self._rawio.header['unit_channels'][unit_index]
h = self._rawio.header['spike_channels'][unit_index]
wf_sampling_rate = h['wf_sampling_rate']
if not np.isnan(wf_sampling_rate) and wf_sampling_rate > 0:
self.sampling_rate = wf_sampling_rate * pq.Hz
Expand Down
6 changes: 3 additions & 3 deletions neo/rawio/axographrawio.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,7 @@
Intervals".
"""

from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
from .baserawio import (BaseRawIO, _signal_channel_dtype, _spike_channel_dtype,
_event_channel_dtype)

import os
Expand Down Expand Up @@ -1316,8 +1316,8 @@ def _scan_axograph_file(self):
np.array(sig_channels, dtype=_signal_channel_dtype)
self.header['event_channels'] = \
np.array(event_channels, dtype=_event_channel_dtype)
self.header['unit_channels'] = \
np.array([], dtype=_unit_channel_dtype)
self.header['spike_channels'] = \
np.array([], dtype=_spike_channel_dtype)

##############################################
# DATA OBJECTS
Expand Down
8 changes: 4 additions & 4 deletions neo/rawio/axonrawio.py
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
reads abf files - would be good to cross-check
"""
from .baserawio import (BaseRawIO, _signal_channel_dtype, _unit_channel_dtype,
from .baserawio import (BaseRawIO, _signal_channel_dtype, _spike_channel_dtype,
_event_channel_dtype)

import numpy as np
Expand Down Expand Up @@ -215,15 +215,15 @@ def _parse_header(self):
event_channels = np.array(event_channels, dtype=_event_channel_dtype)

# No spikes
unit_channels = []
unit_channels = np.array(unit_channels, dtype=_unit_channel_dtype)
spike_channels = []
spike_channels = np.array(spike_channels, dtype=_spike_channel_dtype)

# fille into header dict
self.header = {}
self.header['nb_block'] = 1
self.header['nb_segment'] = [nb_segment]
self.header['signal_channels'] = sig_channels
self.header['unit_channels'] = unit_channels
self.header['spike_channels'] = spike_channels
self.header['event_channels'] = event_channels

# insert some annotation at some place
Expand Down
65 changes: 36 additions & 29 deletions neo/rawio/baserawio.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
BaseRawIO
abstract class which should be overridden to write a RawIO.
RawIO is a new API in neo that is supposed to acces as fast as possible
RawIO is a low level API in neo that is supposed to acces as fast as possible
raw data. All IO with theses characteristics should/could be rewritten:
* internally use of memmap (or hdf5)
* reading header is quite cheap (not read all the file)
Expand All @@ -21,20 +21,24 @@
* Only one channel set for SpikeTrain (aka Unit) stable along Segment
* AnalogSignal have all the same sampling_rate acroos all Segment
* t_start/t_stop are the same for many object (SpikeTrain, Event) inside a Segment
* AnalogSignal should all have the same sampling_rate otherwise the won't be read
a the same time. So signal_group_mode=='split-all' in BaseFromRaw
signal channels are handled by group of "stream".
one stream will at neo.io level one AnalogSignal with multi-channel.
A helper class `neo.io.basefromrawio.BaseFromRaw` transform a RawIO to
neo legacy IO. In short all "neo.rawio" classes are also "neo.io"
with lazy reading capability.
A helper class `neo.io.basefromrawio.BaseFromRaw` should transform a RawIO to
neo legacy IO from free.
With this API the IO have an attributes `header` with necessary keys.
This `header` attribute is done in `_parse_header(...)` method.
See ExampleRawIO as example.
BaseRawIO implement a possible presistent cache system that can be used
by some IOs to avoid very long parse_header(). The idea is that some variable
or vector can be store somewhere (near the fiel, /tmp, any path)
or vector can be store somewhere (near the file, /tmp, any path)
"""
Expand All @@ -60,19 +64,20 @@
error_header = 'Header is not read yet, do parse_header() first'

_signal_channel_dtype = [
('name', 'U64'),
('id', 'int64'),
('name', 'U64'), # not necessary unique
('id', 'U64'), # must be unique
('sampling_rate', 'float64'),
('dtype', 'U16'),
('units', 'U64'),
('gain', 'float64'),
('offset', 'float64'),
('group_id', 'int64'),
('local_index', 'int64'),
]

_common_sig_characteristics = ['sampling_rate', 'dtype', 'group_id']
_common_sig_characteristics = ['sampling_rate', 'dtype', 'stream_id']

_unit_channel_dtype = [
_spike_channel_dtype = [
('name', 'U64'),
('id', 'U64'),
# for waveform
Expand All @@ -83,10 +88,12 @@
('wf_sampling_rate', 'float64'),
]

# in rawio event and epoch are handle the same way
# duration is None for event
_event_channel_dtype = [
('name', 'U64'),
('id', 'U64'),
('type', 'S5'), # epoch ot event
('type', 'S5'), # epoch or event
]


Expand Down Expand Up @@ -140,7 +147,7 @@ def parse_header(self):
self.header['nb_block']
self.header['nb_segment']
self.header['signal_channels']
self.header['units_channels']
self.header['spike_channels']
self.header['event_channels']
Expand All @@ -161,7 +168,7 @@ def __repr__(self):
nb_seg = [self.segment_count(i) for i in range(nb_block)]
txt += 'nb_segment: {}\n'.format(nb_seg)

for k in ('signal_channels', 'unit_channels', 'event_channels'):
for k in ('signal_channels', 'spike_channels', 'event_channels'):
ch = self.header[k]
if len(ch) > 8:
chantxt = "[{} ... {}]".format(', '.join(e for e in ch['name'][:4]),
Expand All @@ -180,7 +187,7 @@ def _generate_minimal_annotations(self):
* block_count()
* segment_count()
* signal_channels_count()
* unit_channels_count()
* spike_channels_count()
* event_channels_count()
Usage:
Expand All @@ -195,10 +202,10 @@ def _generate_minimal_annotations(self):
Standard annotation like name/id/file_origin are already generated here.
"""
signal_channels = self.header['signal_channels']
unit_channels = self.header['unit_channels']
spike_channels = self.header['spike_channels']
event_channels = self.header['event_channels']

a = {'blocks': [], 'signal_channels': [], 'unit_channels': [], 'event_channels': []}
a = {'blocks': [], 'signal_channels': [], 'spike_channels': [], 'event_channels': []}
for block_index in range(self.block_count()):
d = {'segments': []}
d['file_origin'] = self.source_name()
Expand All @@ -215,11 +222,11 @@ def _generate_minimal_annotations(self):
d['channel_id'] = signal_channels['id'][c]
a['blocks'][block_index]['segments'][seg_index]['signals'].append(d)

for c in range(unit_channels.size):
for c in range(spike_channels.size):
# use for SpikeTrain.annotations
d = {}
d['name'] = unit_channels['name'][c]
d['id'] = unit_channels['id'][c]
d['name'] = spike_channels['name'][c]
d['id'] = spike_channels['id'][c]
a['blocks'][block_index]['segments'][seg_index]['units'].append(d)

for c in range(event_channels.size):
Expand All @@ -238,13 +245,13 @@ def _generate_minimal_annotations(self):
d['file_origin'] = self._source_name()
a['signal_channels'].append(d)

for c in range(unit_channels.size):
for c in range(spike_channels.size):
# use for Unit.annotations
d = {}
d['name'] = unit_channels['name'][c]
d['id'] = unit_channels['id'][c]
d['name'] = spike_channels['name'][c]
d['id'] = spike_channels['id'][c]
d['file_origin'] = self._source_name()
a['unit_channels'].append(d)
a['spike_channels'].append(d)

for c in range(event_channels.size):
# not used in neo.io at the moment could usefull one day
Expand All @@ -269,7 +276,7 @@ def _raw_annotate(self, obj_name, chan_index=0, block_index=0, seg_index=0, **ka
elif obj_name in ['signals', 'events', 'units']:
obj_annotations = seg_annotations[obj_name][chan_index]
obj_annotations.update(kargs)
elif obj_name in ['signal_channels', 'unit_channels', 'event_channel']:
elif obj_name in ['signal_channels', 'spike_channels', 'event_channel']:
obj_annotations = self.raw_annotations[obj_name][chan_index]
obj_annotations.update(kargs)

Expand Down Expand Up @@ -320,11 +327,11 @@ def signal_channels_count(self):
"""
return len(self.header['signal_channels'])

def unit_channels_count(self):
def spike_channels_count(self):
"""Return the number of unit (aka spike) channels.
Same along all Blocks and Segment.
"""
return len(self.header['unit_channels'])
return len(self.header['spike_channels'])

def event_channels_count(self):
"""Return the number of event/epoch channels.
Expand Down Expand Up @@ -353,7 +360,7 @@ def _group_signal_channel_characteristics(self):
Group signals channels by same characteristics:
* sampling_rate (global along block and segment)
* group_id (explicite channel group)
* stream_id (explicite channel group)
If all channels have the same characteristics then
`get_analogsignal_chunk` can be call wihtout restriction.
Expand Down Expand Up @@ -534,8 +541,8 @@ def get_spike_raw_waveforms(self, block_index=0, seg_index=0, unit_index=0,
return wf

def rescale_waveforms_to_float(self, raw_waveforms, dtype='float32', unit_index=0):
wf_gain = self.header['unit_channels']['wf_gain'][unit_index]
wf_offset = self.header['unit_channels']['wf_offset'][unit_index]
wf_gain = self.header['spike_channels']['wf_gain'][unit_index]
wf_offset = self.header['spike_channels']['wf_offset'][unit_index]

float_waveforms = raw_waveforms.astype(dtype)

Expand Down
4 changes: 2 additions & 2 deletions neo/rawio/bci2000rawio.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
https://www.bci2000.org/mediawiki/index.php/Technical_Reference:BCI2000_File_Format
"""

from .baserawio import BaseRawIO, _signal_channel_dtype, _unit_channel_dtype, _event_channel_dtype
from .baserawio import BaseRawIO, _signal_channel_dtype, _spike_channel_dtype, _event_channel_dtype

import numpy as np
import re
Expand Down Expand Up @@ -62,7 +62,7 @@ def _parse_header(self):
sig_channels.append((ch_name, chan_id, sr, dtype, units, gain, offset, group_id))
self.header['signal_channels'] = np.array(sig_channels, dtype=_signal_channel_dtype)

self.header['unit_channels'] = np.array([], dtype=_unit_channel_dtype)
self.header['spike_channels'] = np.array([], dtype=_spike_channel_dtype)

# creating event channel for each state variable
event_channels = []
Expand Down
Loading

0 comments on commit 8b5265f

Please sign in to comment.