Skip to content

Commit 5c49aa9

Browse files
authored
Merge branch 'master' into update-ci
2 parents f18e440 + 873dcb5 commit 5c49aa9

9 files changed

Lines changed: 232 additions & 41 deletions

File tree

.github/workflows/io-test.yml

Lines changed: 0 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -119,13 +119,6 @@ jobs:
119119
# run: |
120120
# pip install --no-dependencies -e .
121121

122-
- name: Install wine
123-
run: |
124-
sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
125-
sudo dpkg --add-architecture i386
126-
sudo apt-get update -qq
127-
sudo apt-get install -yqq --allow-downgrades libc6:i386 libgcc-s1:i386 libstdc++6:i386 wine
128-
129122
- name: Pip list
130123
run: |
131124
pip list
Lines changed: 109 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,109 @@
1+
name: NeoPlexon2Test
2+
3+
on:
4+
workflow_dispatch:
5+
schedule:
6+
- cron: "0 12 * * 0" # weekly at noon UTC on Sundays
7+
8+
9+
jobs:
10+
build-and-test:
11+
name: Test on (${{ inputs.os }}) (${{ matrix.python-version}}) (${{ matrix.numpy-version }})
12+
runs-on: ${{ inputs.os }}
13+
strategy:
14+
fail-fast: true
15+
matrix:
16+
python-version: ['3.9', '3.12']
17+
numpy-version: ['1.26', '2.0']
18+
defaults:
19+
# by default run in bash mode (required for conda usage)
20+
run:
21+
shell: bash -l {0}
22+
steps:
23+
24+
- name: Checkout repository
25+
uses: actions/checkout@v4
26+
27+
- name: Get current year-month
28+
id: date
29+
run: echo "date=$(date +'%Y-%m')" >> $GITHUB_OUTPUT
30+
31+
- name: Get ephy_testing_data current head hash
32+
# the key depend on the last commit repo https://gin.g-node.org/NeuralEnsemble/ephy_testing_data.git
33+
id: ephy_testing_data
34+
run: |
35+
echo "dataset_hash=$(git ls-remote https://gin.g-node.org/NeuralEnsemble/ephy_testing_data.git HEAD | cut -f1)" >> $GITHUB_OUTPUT
36+
37+
- uses: actions/cache/restore@v4
38+
# Loading cache of ephys_testing_dataset
39+
id: cache-datasets
40+
with:
41+
path: ~/ephy_testing_data
42+
key: ${{ runner.os }}-datasets-${{ steps.ephy_testing_data.outputs.dataset_hash }}
43+
restore-keys: ${{ runner.os }}-datasets-
44+
45+
- uses: conda-incubator/setup-miniconda@v3
46+
with:
47+
activate-environment: neo-test-env-${{ matrix.python-version }}
48+
python-version: "${{ matrix.python-version }}"
49+
50+
- name: Install testing dependencies
51+
# testing environment is only created from yml if no cache was found
52+
# restore-key hits should result in `cache-hit` == 'false'
53+
#if: steps.cache-conda-env.outputs.cache-hit != 'true'
54+
run: |
55+
conda install pip numpy=${{ matrix.numpy-version }} -c conda-forge
56+
# this command is for updating cache. We are resting removal.
57+
# conda env update --name neo-test-env-${{ matrix.python-version }} --file environment_testing.yml --prune
58+
59+
- name: Install git-annex
60+
# this is the trick from the spikeinterface repo for getting git-annex to work with datalad
61+
# see https://github.com/SpikeInterface/spikeinterface/pull/3877 for more info
62+
shell: bash
63+
run: |
64+
pip install datalad-installer
65+
datalad-installer --sudo ok git-annex --method datalad/packages
66+
git config --global filter.annex.process "git-annex filter-process" # recommended for efficiency
67+
68+
- name: Configure git
69+
run: |
70+
git config --global user.email "neo_ci@fake_mail.com"
71+
git config --global user.name "neo CI"
72+
73+
- name: Python version
74+
run: |
75+
which python
76+
python --version
77+
78+
- name: Install neo including dependencies
79+
# installation with dependencies is only required if no cache was found
80+
# restore-key hits should result in `cache-hit` == 'false'
81+
# if: steps.cache-conda-env.outputs.cache-hit != 'true'
82+
run: |
83+
pip install --upgrade -e .
84+
pip install .[test]
85+
86+
87+
- name: Install wine
88+
run: |
89+
sudo rm -f /etc/apt/sources.list.d/microsoft-prod.list
90+
sudo dpkg --add-architecture i386
91+
sudo apt-get update -qq
92+
sudo apt-get install -yqq --allow-downgrades libc6:i386 libgcc-s1:i386 libstdc++6:i386 wine
93+
94+
- name: Pip list
95+
run: |
96+
pip list
97+
98+
- name: Conda list
99+
run: |
100+
conda list
101+
102+
- name: Test with pytest
103+
env:
104+
HDF5_PLUGIN_PATH: ${{ github.workspace }}/hdf5_local_plugin_path
105+
PLEXON2_TEST: true
106+
run: |
107+
# only neo.rawio and neo.io
108+
pytest --cov=neo neo/test/rawiotest
109+
pytest --cov=neo neo/test/iotest

doc/source/authors.rst

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ and may not be the current affiliation of a contributor.
7171
* Daniel P. Crepeau [30]
7272
* Divyansh Gupta [31]
7373
* Nate Dolensek [3]
74-
* Philipp Hornauer [32]
74+
* Philipp Hornauer [32, 45]
7575
* Robert Wolff [42]
7676
* Jules Lebert [33]
7777
* Benjamin Heasly
@@ -140,6 +140,7 @@ and may not be the current affiliation of a contributor.
140140
42. Istituto Italiano di Tecnologia (IIT), Genoa, Italy
141141
43. University of Genoa, Italy
142142
44. AquiNeuro, SAS
143+
45. Maxwell Biosystems AG
143144

144145

145146

neo/rawio/blackrockrawio.py

Lines changed: 10 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -597,10 +597,17 @@ def _parse_header(self):
597597
for c in range(spike_channels.size):
598598
st_ann = seg_ann["spikes"][c]
599599
channel_id, unit_id = self.internal_unit_ids[c]
600-
unit_tag = {0: "unclassified", 255: "noise"}.get(unit_id, str(unit_id))
601600
st_ann["channel_id"] = channel_id
602601
st_ann["unit_id"] = unit_id
603-
st_ann["unit_tag"] = unit_tag
602+
if unit_id == 0:
603+
st_ann["unit_classification"] = "unclassified"
604+
elif 1 <= unit_id <= 16:
605+
st_ann["unit_classification"] = "sorted"
606+
elif unit_id == 255:
607+
st_ann["unit_classification"] = "noise"
608+
else: # 17-254 are reserved
609+
st_ann["unit_classification"] = "reserved"
610+
st_ann["unit_tag"] = st_ann["unit_classification"]
604611
st_ann["description"] = f"SpikeTrain channel_id: {channel_id}, unit_id: {unit_id}"
605612
st_ann["file_origin"] = self._filenames["nev"] + ".nev"
606613

@@ -1258,9 +1265,7 @@ def __read_nev_data(self, nev_data_masks, nev_data_types):
12581265
# based on blackrock's own code this is okay so applying an int to round down is necessary to obtain the
12591266
# memory map of full packets and toss the partial packet.
12601267
# See reference: https://github.com/BlackrockNeurotech/Python-Utilities/blob/fa75aa671680306788e10d3d8dd625f9da4ea4f6/brpylib/brpylib.py#L580-L587
1261-
n_packets = int(
1262-
(self.__get_file_size(filename) - header_size) / data_size
1263-
)
1268+
n_packets = int((self.__get_file_size(filename) - header_size) / data_size)
12641269

12651270
raw_data = np.memmap(
12661271
filename,

neo/rawio/maxwellrawio.py

Lines changed: 20 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -242,18 +242,20 @@ def _get_analogsignal_chunk(self, block_index, seg_index, i_start, i_stop, strea
242242
raise (e)
243243

244244

245-
_hdf_maxwell_error = """Maxwell file format is based on HDF5.
246-
The internal compression requires a custom plugin!!!
247-
This is a big pain for the end user.
248-
You, as a end user, should ask Maxwell company to change this.
249-
Please visit this page and install the missing decompression libraries:
250-
https://share.mxwbio.com/d/4742248b2e674a85be97/
251-
Then, link the decompression library by setting the `HDF5_PLUGIN_PATH` to your
252-
installation location, e.g. via
245+
_hdf_maxwell_error = """The MaxWell file compression requires a custom plugin.
246+
You can use the auto_install_maxwell_hdf5_compression_plugin() function or
247+
(if it fails) install it manually:
248+
Download the missing decompression library:
249+
https://share.mxwbio.com/d/7f2d1e98a1724a1b8b35
250+
Then, link the decompression library by setting the `HDF5_PLUGIN_PATH` to its location,
251+
e.g. directly in Python via:
253252
os.environ['HDF5_PLUGIN_PATH'] = '/path/to/custom/hdf5/plugin/'
254-
255-
Alternatively, you can use the auto_install_maxwell_hdf5_compression_plugin() below
256-
function that do it automagically.
253+
or in your shell via:
254+
export HDF5_PLUGIN_PATH=/path/to/custom/hdf5/plugin/
255+
You can also set the `HDF5_PLUGIN_PATH` environment variable in your shell
256+
configuration file (e.g. .bashrc, .bash_profile, .zshrc, etc.) to make it
257+
permanent.
258+
See https://mxw.bio/MxW_Doc_Installing_Decompression_Library_to_load_MaxLab_Live_Recordings for more details.
257259
"""
258260

259261

@@ -267,13 +269,17 @@ def auto_install_maxwell_hdf5_compression_plugin(hdf5_plugin_path=None, force_do
267269
hdf5_plugin_path.mkdir(exist_ok=True)
268270

269271
if platform.system() == "Linux":
270-
remote_lib = "https://share.mxwbio.com/d/4742248b2e674a85be97/files/?p=%2FLinux%2Flibcompression.so&dl=1"
272+
remote_lib = "https://share.mxwbio.com/d/7f2d1e98a1724a1b8b35/files/?p=%2FLinux%2Flibcompression.so&dl=1"
271273
local_lib = hdf5_plugin_path / "libcompression.so"
272274
elif platform.system() == "Darwin":
273-
remote_lib = "https://share.mxwbio.com/d/4742248b2e674a85be97/files/?p=%2FMacOS%2Flibcompression.dylib&dl=1"
275+
if platform.machine() == "arm64":
276+
remote_lib = "https://share.mxwbio.com/d/7f2d1e98a1724a1b8b35/files/?p=%2FMacOS%2FMac_arm64%2Flibcompression.dylib&dl=1"
277+
else:
278+
# Assuming x86_64 for MacOS
279+
remote_lib = "https://share.mxwbio.com/d/7f2d1e98a1724a1b8b35/files/?p=%2FMacOS%2FMac_x86_64%2Flibcompression.dylib&dl=1"
274280
local_lib = hdf5_plugin_path / "libcompression.dylib"
275281
elif platform.system() == "Windows":
276-
remote_lib = "https://share.mxwbio.com/d/4742248b2e674a85be97/files/?p=%2FWindows%2Fcompression.dll&dl=1"
282+
remote_lib = "https://share.mxwbio.com/d/7f2d1e98a1724a1b8b35/files/?p=%2FWindows%2Fcompression.dll&dl=1"
277283
local_lib = hdf5_plugin_path / "compression.dll"
278284

279285
if not force_download and local_lib.is_file():

neo/rawio/spikeglxrawio.py

Lines changed: 55 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -53,6 +53,7 @@
5353
from pathlib import Path
5454
import os
5555
import re
56+
from warnings import warn
5657

5758
import numpy as np
5859

@@ -76,7 +77,9 @@ class SpikeGLXRawIO(BaseRawWithBufferApiIO):
7677
dirname: str, default: ''
7778
The spikeglx folder containing meta/bin files
7879
load_sync_channel: bool, default: False
79-
The last channel (SY0) of each stream is a fake channel used for synchronisation
80+
Can be used to load the synch stream as the last channel of the neural data.
81+
This option is deprecated and will be removed in version 0.15.
82+
From versions higher than 0.14.1 the sync channel is always loaded as a separate stream.
8083
load_channel_location: bool, default: False
8184
If True probeinterface is used to load the channel locations from the directory
8285
@@ -109,6 +112,13 @@ def __init__(self, dirname="", load_sync_channel=False, load_channel_location=Fa
109112
BaseRawWithBufferApiIO.__init__(self)
110113
self.dirname = dirname
111114
self.load_sync_channel = load_sync_channel
115+
if load_sync_channel:
116+
warn(
117+
"The load_sync_channel=True option is deprecated and will be removed in version 0.15 \n"
118+
"The sync channel is now loaded as a separate stream by default and should be accessed as such. ",
119+
DeprecationWarning,
120+
stacklevel=2,
121+
)
112122
self.load_channel_location = load_channel_location
113123

114124
def _source_name(self):
@@ -152,6 +162,8 @@ def _parse_header(self):
152162
signal_buffers = []
153163
signal_streams = []
154164
signal_channels = []
165+
sync_stream_id_to_buffer_id = {}
166+
155167
for stream_name in stream_names:
156168
# take first segment
157169
info = self.signals_info_dict[0, stream_name]
@@ -168,6 +180,21 @@ def _parse_header(self):
168180
for local_chan in range(info["num_chan"]):
169181
chan_name = info["channel_names"][local_chan]
170182
chan_id = f"{stream_name}#{chan_name}"
183+
184+
# Sync channel
185+
if (
186+
"nidq" not in stream_name
187+
and "SY0" in chan_name
188+
and not self.load_sync_channel
189+
and local_chan == info["num_chan"] - 1
190+
):
191+
# This is a sync channel and should be added as its own stream
192+
sync_stream_id = f"{stream_name}-SYNC"
193+
sync_stream_id_to_buffer_id[sync_stream_id] = buffer_id
194+
stream_id_for_chan = sync_stream_id
195+
else:
196+
stream_id_for_chan = stream_id
197+
171198
signal_channels.append(
172199
(
173200
chan_name,
@@ -177,25 +204,33 @@ def _parse_header(self):
177204
info["units"],
178205
info["channel_gains"][local_chan],
179206
info["channel_offsets"][local_chan],
180-
stream_id,
207+
stream_id_for_chan,
181208
buffer_id,
182209
)
183210
)
184211

185-
# all channel by dafult unless load_sync_channel=False
212+
# all channel by default unless load_sync_channel=False
186213
self._stream_buffer_slice[stream_id] = None
214+
187215
# check sync channel validity
188216
if "nidq" not in stream_name:
189217
if not self.load_sync_channel and info["has_sync_trace"]:
190-
# the last channel is remove from the stream but not from the buffer
191-
last_chan = signal_channels[-1]
192-
last_chan = last_chan[:-2] + ("", buffer_id)
193-
signal_channels = signal_channels[:-1] + [last_chan]
218+
# the last channel is removed from the stream but not from the buffer
194219
self._stream_buffer_slice[stream_id] = slice(0, -1)
220+
221+
# Add a buffer slice for the sync channel
222+
sync_stream_id = f"{stream_name}-SYNC"
223+
self._stream_buffer_slice[sync_stream_id] = slice(-1, None)
224+
195225
if self.load_sync_channel and not info["has_sync_trace"]:
196226
raise ValueError("SYNC channel is not present in the recording. " "Set load_sync_channel to False")
197227

198228
signal_buffers = np.array(signal_buffers, dtype=_signal_buffer_dtype)
229+
230+
# Add sync channels as their own streams
231+
for sync_stream_id, buffer_id in sync_stream_id_to_buffer_id.items():
232+
signal_streams.append((sync_stream_id, sync_stream_id, buffer_id))
233+
199234
signal_streams = np.array(signal_streams, dtype=_signal_stream_dtype)
200235
signal_channels = np.array(signal_channels, dtype=_signal_channel_dtype)
201236

@@ -237,6 +272,14 @@ def _parse_header(self):
237272
t_start = frame_start / sampling_frequency
238273

239274
self._t_starts[stream_name][seg_index] = t_start
275+
276+
# This need special logic because sync not present in stream_names
277+
if f"{stream_name}-SYNC" in signal_streams["name"]:
278+
sync_stream_name = f"{stream_name}-SYNC"
279+
if sync_stream_name not in self._t_starts:
280+
self._t_starts[sync_stream_name] = {}
281+
self._t_starts[sync_stream_name][seg_index] = t_start
282+
240283
t_stop = info["sample_length"] / info["sampling_rate"]
241284
self._t_stops[seg_index] = max(self._t_stops[seg_index], t_stop)
242285

@@ -266,6 +309,10 @@ def _parse_header(self):
266309
# need probeinterface to be installed
267310
import probeinterface
268311

312+
# Skip for sync streams
313+
if "SYNC" in stream_name:
314+
continue
315+
269316
info = self.signals_info_dict[seg_index, stream_name]
270317
if "imroTbl" in info["meta"] and info["stream_kind"] == "ap":
271318
# only for ap channel
@@ -529,7 +576,7 @@ def extract_stream_info(meta_file, meta):
529576
if (
530577
"imDatPrb_type" not in meta
531578
or meta["imDatPrb_type"] == "0"
532-
or meta["imDatPrb_type"] in ("1015", "1016", "1022", "1030", "1031", "1032", "1100", "1121", "1300")
579+
or meta["imDatPrb_type"] in ("1015", "1016", "1022", "1030", "1031", "1032", "1100", "1121", "1123","1300")
533580
):
534581
# This work with NP 1.0 case with different metadata versions
535582
# https://github.com/billkarsh/SpikeGLX/blob/15ec8898e17829f9f08c226bf04f46281f106e5f/Markdown/Metadata_30.md

neo/test/iotest/test_plexon2io.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
"""
44

55
import unittest
6+
import os
67

78
from neo.io import Plexon2IO
89
from neo.test.iotest.common_io_test import BaseTestIO
@@ -17,8 +18,9 @@
1718
except (ImportError, TimeoutError):
1819
HAVE_PYPL2 = False
1920

21+
TEST_PLEXON2 = bool(os.getenv("PLEXON2_TEST"))
2022

21-
@unittest.skipUnless(HAVE_PYPL2, "requires pypl package and all its dependencies")
23+
@unittest.skipUnless(HAVE_PYPL2 and TEST_PLEXON2, "requires pypl package and all its dependencies")
2224
class TestPlexon2IO(BaseTestIO, unittest.TestCase):
2325
entities_to_download = TestPlexon2RawIO.entities_to_download
2426
entities_to_test = TestPlexon2RawIO.entities_to_test

0 commit comments

Comments
 (0)