How to use the phylib.utils.event.emit function in phylib

To help you get started, we’ve selected a few phylib examples, based on popular ways it is used in public projects.

Secure your code as it's written. Use Snyk Code to scan source code in minutes - no build needed - and fix issues immediately.

github cortex-lab / phy / phy / cluster / clustering.py View on Github external
# Normalize the spike-cluster assignment such that
        # there are only new or dead clusters, not modified clusters.
        # This implies that spikes not explicitly selected, but that
        # belong to clusters affected by the operation, will be assigned
        # to brand new clusters.
        spike_ids, cluster_ids = _extend_assignment(
            spike_ids, self._spike_clusters, spike_clusters_rel, self.new_cluster_id())

        up = self._do_assign(spike_ids, cluster_ids)
        undo_state = emit('request_undo_state', self, up)

        # Add the assignment to the undo stack.
        self._undo_stack.add((spike_ids, cluster_ids, undo_state))

        emit('cluster', self, up)
        return up
github cortex-lab / phy / phy / cluster / views / amplitude.py View on Github external
def on_mouse_click(self, e):
        """Select a time from the amplitude view to display in the trace view."""
        if 'Alt' in e.modifiers:
            mouse_pos = self.canvas.panzoom.window_to_ndc(e.pos)
            time = Range(NDC, self.data_bounds).apply(mouse_pos)[0][0]
            emit('select_time', self, time)
github cortex-lab / phy / phy / cluster / clustering.py View on Github external
if len(spike_ids) == 0:
            return UpdateInfo()
        assert len(spike_ids) == len(spike_clusters_rel)
        assert spike_ids.min() >= 0
        assert spike_ids.max() < self._n_spikes, "Some spikes don't exist."

        # Normalize the spike-cluster assignment such that
        # there are only new or dead clusters, not modified clusters.
        # This implies that spikes not explicitly selected, but that
        # belong to clusters affected by the operation, will be assigned
        # to brand new clusters.
        spike_ids, cluster_ids = _extend_assignment(
            spike_ids, self._spike_clusters, spike_clusters_rel, self.new_cluster_id())

        up = self._do_assign(spike_ids, cluster_ids)
        undo_state = emit('request_undo_state', self, up)

        # Add the assignment to the undo stack.
        self._undo_stack.add((spike_ids, cluster_ids, undo_state))

        emit('cluster', self, up)
        return up
github cortex-lab / phy / phy / cluster / clustering.py View on Github external
# NOTE: we could have called self.assign() here, but we don't.
        # We circumvent self.assign() for performance reasons.
        # assign() is a relatively costly operation, whereas merging is a much
        # cheaper operation.

        # Find all spikes in the specified clusters.
        spike_ids = _spikes_in_clusters(self.spike_clusters, cluster_ids)

        up = self._do_merge(spike_ids, cluster_ids, to)
        undo_state = emit('request_undo_state', self, up)

        # Add to stack.
        self._undo_stack.add((spike_ids, [to], undo_state))

        emit('cluster', self, up)
        return up
github cortex-lab / phy / phy / cluster / clustering.py View on Github external
if item is None:
            # No redo has been performed: abort.
            return

        # NOTE: the undo_state object is only returned when undoing.
        # It represents data associated to the state
        # *before* the action. What might be more useful would be the
        # undo_state object of the next item in the list (if it exists).
        spike_ids, cluster_ids, undo_state = item
        assert spike_ids is not None

        # We apply the new assignment.
        up = self._do_assign(spike_ids, cluster_ids)
        up.history = 'redo'

        emit('cluster', self, up)
        return up
github cortex-lab / phy / phy / cluster / clustering.py View on Github external
# Loop over the history (except the last item because we undo).
        for spike_ids, cluster_ids, _ in self._undo_stack:
            # We update the spike clusters accordingly.
            if spike_ids is not None:
                spike_clusters_new[spike_ids] = cluster_ids

        # What are the spikes affected by the last changes?
        changed = np.nonzero(self._spike_clusters != spike_clusters_new)[0]
        clusters_changed = spike_clusters_new[changed]

        up = self._do_assign(changed, clusters_changed)
        up.history = 'undo'
        # Add the undo_state object from the undone object.
        up.undo_state = undo_state

        emit('cluster', self, up)
        return up
github cortex-lab / phy / phy / cluster / clustering.py View on Github external
if to is None:
            to = self.new_cluster_id()
        if to < self.new_cluster_id():
            raise ValueError(
                "The new cluster numbers should be higher than {0}.".format(self.new_cluster_id()))

        # NOTE: we could have called self.assign() here, but we don't.
        # We circumvent self.assign() for performance reasons.
        # assign() is a relatively costly operation, whereas merging is a much
        # cheaper operation.

        # Find all spikes in the specified clusters.
        spike_ids = _spikes_in_clusters(self.spike_clusters, cluster_ids)

        up = self._do_merge(spike_ids, cluster_ids, to)
        undo_state = emit('request_undo_state', self, up)

        # Add to stack.
        self._undo_stack.add((spike_ids, [to], undo_state))

        emit('cluster', self, up)
        return up