-
Notifications
You must be signed in to change notification settings - Fork 266
[WIP] Automatic handling of relationships between objects #588
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,150 @@ | ||
| # -*- coding: utf-8 -*- | ||
| """ | ||
| This module implements :class:`SpikeTrainList`, a pseudo-list | ||
| which takes care of relationships between Neo parent-child objects. | ||
|
|
||
| In addition, it supports a multiplexed representation of spike trains | ||
| (all times in a single array, with a second array indicating which | ||
| neuron/channel the spike is from). | ||
| """ | ||
|
|
||
| import numpy as np | ||
| from .spiketrain import SpikeTrain | ||
|
|
||
|
|
||
| class SpikeTrainList(object): | ||
| """ | ||
| docstring needed | ||
| """ | ||
|
|
||
| def __init__(self, items=None, segment=None): | ||
| """Initialize self""" | ||
| self._items = items | ||
| self._spike_time_array = None | ||
| self._channel_id_array = None | ||
| self._all_channel_ids = None | ||
| self._spiketrain_metadata = None | ||
| self.segment = segment | ||
|
|
||
| def __iter__(self): | ||
| """Implement iter(self)""" | ||
| if self._items is None: | ||
| self._spiketrains_from_array() | ||
| for item in self._items: | ||
| yield item | ||
|
|
||
| def __getitem__(self, i): | ||
| """x.__getitem__(y) <==> x[y]""" | ||
| if self._items is None: | ||
| self._spiketrains_from_array() | ||
| return self._items[i] | ||
|
|
||
| def __str__(self): | ||
| """Return str(self)""" | ||
| if self._items is None: | ||
| if self._spike_time_array is None: | ||
| return str([]) | ||
| else: | ||
| return "SpikeTrainList containing {} spikes from {} neurons".format( | ||
| self._spike_time_array.size, | ||
| self._channel_id_array.size) | ||
| else: | ||
| return str(self._items) | ||
|
|
||
| def __len__(self): | ||
| """Return len(self)""" | ||
| if self._items is None: | ||
| if self._all_channel_ids is not None: | ||
| return len(self._all_channel_ids) | ||
| elif self._channel_id_array is not None: | ||
| return np.unique(self._channel_id_array).size | ||
| else: | ||
| return 0 | ||
| else: | ||
| return len(self._items) | ||
|
|
||
| def __add__(self, other): | ||
| """Return self + other""" | ||
| if isinstance(other, self.__class__): | ||
| if self._items is None or other._items is None: | ||
| # todo: update self._spike_time_array, etc. | ||
| raise NotImplementedError | ||
| else: | ||
| self._items.extend(other._items) | ||
| return self | ||
| elif other and isinstance(other[0], SpikeTrain): | ||
| for obj in other: | ||
| obj.segment = self.segment | ||
| self._items.extend(other) | ||
| return self | ||
| else: | ||
| return self._items + other | ||
|
|
||
| def __radd__(self, other): | ||
| """Return other + self""" | ||
| if self._items is None: | ||
| self._spiketrains_from_array() | ||
| other.extend(self._items) | ||
| return other | ||
|
|
||
| def append(self, obj): | ||
| """L.append(object) -> None -- append object to end""" | ||
| if not isinstance(obj, SpikeTrain): | ||
| raise ValueError("Can only append SpikeTrain objects") | ||
| if self._items is None: | ||
| self._spiketrains_from_array() | ||
| obj.segment = self.segment | ||
| self._items.append(obj) | ||
|
|
||
| def extend(self, iterable): | ||
| """L.extend(iterable) -> None -- extend list by appending elements from the iterable""" | ||
| if self._items is None: | ||
| self._spiketrains_from_array() | ||
| for obj in iterable: | ||
| obj.segment = self.segment | ||
| self._items.extend(iterable) | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. maybe it's necessary to also verify that the items of
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. yes, more robust checks are needed |
||
|
|
||
| @classmethod | ||
| def from_spike_time_array(cls, spike_time_array, channel_id_array, | ||
| all_channel_ids=None, units='ms', | ||
| t_start=None, t_stop=None): | ||
| """Create a SpikeTrainList object from an array of spike times | ||
| and an array of channel ids.""" | ||
| obj = cls() | ||
| obj._spike_time_array = spike_time_array | ||
| obj._channel_id_array = channel_id_array | ||
| obj._all_channel_ids = all_channel_ids | ||
| obj._spiketrain_metadata = { | ||
| "units": units, | ||
| "t_start": t_start, | ||
| "t_stop": t_stop | ||
| } | ||
| return obj | ||
|
|
||
| def _spiketrains_from_array(self): | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This function is quite expensive, as it generates all Spiketrain objects explicitly. As it is used in many of Spiketrainlist methods, this would cause quite a bit of overhead. Maybe it would be good to use it in only as few places as possible?
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's only ever used once, because the generated SpikeTrain objects are cached in
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, you are right. So in this PR the
Member
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. you're right that merging two array-representation SpikeTrainLists should avoid creating The general idea is to keep data in the representation they arrive in as long as possible, to avoid unecessary transformations, i.e. the "reference/base" representation depends on how the object was initialized. Note that the |
||
| """Convert multiplexed spike time data into a list of SpikeTrain objects""" | ||
| if self._spike_time_array is None: | ||
| self._items = [] | ||
| else: | ||
| if self._all_channel_ids is None: | ||
| all_channel_ids = np.unique(self._channel_id_array) | ||
| else: | ||
| all_channel_ids = self._all_channel_ids | ||
| for channel_id in all_channel_ids: | ||
| mask = self._channel_id_array == channel_id | ||
| times = self._spike_time_array[mask] | ||
| spiketrain = SpikeTrain(times, **self._spiketrain_metadata) | ||
| spiketrain.segment = self.segment | ||
| self._items.append(spiketrain) | ||
|
|
||
| @property | ||
| def multiplexed(self): | ||
| """Return spike trains as a pair of arrays. | ||
|
|
||
| The first array contains the ids of the channels/neurons that produced each spike, | ||
| the second array contains the times of the spikes. | ||
| """ | ||
| if self._spike_time_array is None: | ||
| # need to convert list of SpikeTrains into multiplexed spike times array | ||
| raise NotImplementedError | ||
| return self._channel_id_array, self._spike_time_array | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
In principle this would need to check if other is iterable and assert that all contained items are
SpikeTraininstances, no?There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
yes, this was a shortcut, but it would be more robust to do a full check