Open
Description
Hi,
I am currently using GLX extractor, for posterior kilosort3 processing. The spikeinterface version is 0.98.1.
The read_spikeglx function throws the following error.
In [6]: ls /mnt/boninlab/boninlabwip2024/data/ephys/shahriar/Raw/Chronic/SH050/230911/trof/
sh0123br200bB_g0_tcat.imec0.ap.bin* sh0123br200_bigopenfield_dim_g0_tcat.imec0.ap.bin*
sh0123br200bB_g0_tcat.imec0.ap.meta* sh0123br200_bigopenfield_dim_g0_tcat.imec0.ap.meta*
In [7]: spikeinterface.__version__
Out[7]: '0.98.1'
In [8]: recordings = se.read_spikeglx( Path("/mnt/boninlab/boninlabwip2024/data/ephys/shahriar/Raw/Chronic/SH050/230911/trof/") , stream_id = "imec0.ap")
---------------------------------------------------------------------------
AssertionError Traceback (most recent call last)
Cell In[8], line 1
----> 1 recordings = se.read_spikeglx( Path("/mnt/boninlab/boninlabwip2024/data/ephys/shahriar/Raw/Chronic/SH050/230911/trof/") , stream_id = "imec0.ap")
File /opt/anaconda/anaconda3/envs/spikeinterface_0.98.1/lib/python3.10/site-packages/spikeinterface/extractors/neoextractors/spikeglx.py:51, in SpikeGLXRecordingExtractor.__init__(self, folder_path, load_sync_channel, stream_id, stream_name, all_annotations)
49 def __init__(self, folder_path, load_sync_channel=False, stream_id=None, stream_name=None, all_annotations=False):
50 neo_kwargs = self.map_to_neo_kwargs(folder_path, load_sync_channel=load_sync_channel)
---> 51 NeoBaseRecordingExtractor.__init__(
52 self, stream_id=stream_id, stream_name=stream_name, all_annotations=all_annotations, **neo_kwargs
53 )
55 # open the corresponding stream probe for LF and AP
56 # if load_sync_channel=False
57 if "nidq" not in self.stream_id and not load_sync_channel:
File /opt/anaconda/anaconda3/envs/spikeinterface_0.98.1/lib/python3.10/site-packages/spikeinterface/extractors/neoextractors/neobaseextractor.py:185, in NeoBaseRecordingExtractor.__init__(self, stream_id, stream_name, block_index, all_annotations, use_names_as_ids, **neo_kwargs)
156 def __init__(
157 self,
158 stream_id: Optional[str] = None,
(...)
163 **neo_kwargs: Dict[str, Any],
164 ) -> None:
165 """
166 Initialize a NeoBaseRecordingExtractor instance.
167
(...)
182
183 """
--> 185 _NeoBaseExtractor.__init__(self, block_index, **neo_kwargs)
187 kwargs = dict(all_annotations=all_annotations)
188 if block_index is not None:
File /opt/anaconda/anaconda3/envs/spikeinterface_0.98.1/lib/python3.10/site-packages/spikeinterface/extractors/neoextractors/neobaseextractor.py:25, in _NeoBaseExtractor.__init__(self, block_index, **neo_kwargs)
23 def __init__(self, block_index, **neo_kwargs):
24 if not hasattr(self, "neo_reader"): # Avoid double initialization
---> 25 self.neo_reader = self.get_neo_io_reader(self.NeoRawIOClass, **neo_kwargs)
27 if self.neo_reader.block_count() > 1 and block_index is None:
28 raise Exception(
29 "This dataset is multi-block. Spikeinterface can load one block at a time. "
30 "Use 'block_index' to select the block to be loaded."
31 )
File /opt/anaconda/anaconda3/envs/spikeinterface_0.98.1/lib/python3.10/site-packages/spikeinterface/extractors/neoextractors/neobaseextractor.py:64, in _NeoBaseExtractor.get_neo_io_reader(cls, raw_class, **neo_kwargs)
62 neoIOclass = getattr(rawio_module, raw_class)
63 neo_reader = neoIOclass(**neo_kwargs)
---> 64 neo_reader.parse_header()
66 return neo_reader
File /opt/anaconda/anaconda3/envs/spikeinterface_0.98.1/lib/python3.10/site-packages/neo/rawio/baserawio.py:179, in BaseRawIO.parse_header(self)
166 def parse_header(self):
167 """
168 This must parse the file header to get all stuff for fast use later on.
169
(...)
177
178 """
--> 179 self._parse_header()
180 self._check_stream_signal_channel_characteristics()
File /opt/anaconda/anaconda3/envs/spikeinterface_0.98.1/lib/python3.10/site-packages/neo/rawio/spikeglxrawio.py:94, in SpikeGLXRawIO._parse_header(self)
91 for info in self.signals_info_list:
92 # key is (seg_index, stream_name)
93 key = (info['seg_index'], info['stream_name'])
---> 94 assert key not in self.signals_info_dict
95 self.signals_info_dict[key] = info
97 # create memmap
AssertionError:
In [9]:
As shown in the ls
output above, there are two data and two meta files in the input directory, which I aim to concatenate afterwards via: spikeinterface.concatenate_recordings
Could you please help me to understand what the problem is?
I’ve also attached the meta file.
Your assistance is greatly appreciated.
Thank you,
Shahriar.
sh0123br200bB_g0_tcat.imec0.ap.meta.txt
sh0123br200_bigopenfield_dim_g0_tcat.imec0.ap.meta.txt