repo
stringlengths
7
55
path
stringlengths
4
127
func_name
stringlengths
1
88
original_string
stringlengths
75
19.8k
language
stringclasses
1 value
code
stringlengths
75
19.8k
code_tokens
list
docstring
stringlengths
3
17.3k
docstring_tokens
list
sha
stringlengths
40
40
url
stringlengths
87
242
partition
stringclasses
1 value
kwikteam/phy
phy/cluster/clustering.py
_extend_spikes
def _extend_spikes(spike_ids, spike_clusters): """Return all spikes belonging to the clusters containing the specified spikes.""" # We find the spikes belonging to modified clusters. # What are the old clusters that are modified by the assignment? old_spike_clusters = spike_clusters[spike_ids] unique_clusters = _unique(old_spike_clusters) # Now we take all spikes from these clusters. changed_spike_ids = _spikes_in_clusters(spike_clusters, unique_clusters) # These are the new spikes that need to be reassigned. extended_spike_ids = np.setdiff1d(changed_spike_ids, spike_ids, assume_unique=True) return extended_spike_ids
python
def _extend_spikes(spike_ids, spike_clusters): """Return all spikes belonging to the clusters containing the specified spikes.""" # We find the spikes belonging to modified clusters. # What are the old clusters that are modified by the assignment? old_spike_clusters = spike_clusters[spike_ids] unique_clusters = _unique(old_spike_clusters) # Now we take all spikes from these clusters. changed_spike_ids = _spikes_in_clusters(spike_clusters, unique_clusters) # These are the new spikes that need to be reassigned. extended_spike_ids = np.setdiff1d(changed_spike_ids, spike_ids, assume_unique=True) return extended_spike_ids
[ "def", "_extend_spikes", "(", "spike_ids", ",", "spike_clusters", ")", ":", "# We find the spikes belonging to modified clusters.", "# What are the old clusters that are modified by the assignment?", "old_spike_clusters", "=", "spike_clusters", "[", "spike_ids", "]", "unique_clusters", "=", "_unique", "(", "old_spike_clusters", ")", "# Now we take all spikes from these clusters.", "changed_spike_ids", "=", "_spikes_in_clusters", "(", "spike_clusters", ",", "unique_clusters", ")", "# These are the new spikes that need to be reassigned.", "extended_spike_ids", "=", "np", ".", "setdiff1d", "(", "changed_spike_ids", ",", "spike_ids", ",", "assume_unique", "=", "True", ")", "return", "extended_spike_ids" ]
Return all spikes belonging to the clusters containing the specified spikes.
[ "Return", "all", "spikes", "belonging", "to", "the", "clusters", "containing", "the", "specified", "spikes", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/clustering.py#L29-L41
train
kwikteam/phy
phy/cluster/clustering.py
Clustering.reset
def reset(self): """Reset the clustering to the original clustering. All changes are lost. """ self._undo_stack.clear() self._spike_clusters = self._spike_clusters_base self._new_cluster_id = self._new_cluster_id_0
python
def reset(self): """Reset the clustering to the original clustering. All changes are lost. """ self._undo_stack.clear() self._spike_clusters = self._spike_clusters_base self._new_cluster_id = self._new_cluster_id_0
[ "def", "reset", "(", "self", ")", ":", "self", ".", "_undo_stack", ".", "clear", "(", ")", "self", ".", "_spike_clusters", "=", "self", ".", "_spike_clusters_base", "self", ".", "_new_cluster_id", "=", "self", ".", "_new_cluster_id_0" ]
Reset the clustering to the original clustering. All changes are lost.
[ "Reset", "the", "clustering", "to", "the", "original", "clustering", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/clustering.py#L178-L186
train
kwikteam/phy
phy/cluster/clustering.py
Clustering._do_assign
def _do_assign(self, spike_ids, new_spike_clusters): """Make spike-cluster assignments after the spike selection has been extended to full clusters.""" # Ensure spike_clusters has the right shape. spike_ids = _as_array(spike_ids) if len(new_spike_clusters) == 1 and len(spike_ids) > 1: new_spike_clusters = (np.ones(len(spike_ids), dtype=np.int64) * new_spike_clusters[0]) old_spike_clusters = self._spike_clusters[spike_ids] assert len(spike_ids) == len(old_spike_clusters) assert len(new_spike_clusters) == len(spike_ids) # Update the spikes per cluster structure. old_clusters = _unique(old_spike_clusters) # NOTE: shortcut to a merge if this assignment is effectively a merge # i.e. if all spikes are assigned to a single cluster. # The fact that spike selection has been previously extended to # whole clusters is critical here. new_clusters = _unique(new_spike_clusters) if len(new_clusters) == 1: return self._do_merge(spike_ids, old_clusters, new_clusters[0]) # We return the UpdateInfo structure. up = _assign_update_info(spike_ids, old_spike_clusters, new_spike_clusters) # We update the new cluster id (strictly increasing during a session). self._new_cluster_id = max(self._new_cluster_id, max(up.added) + 1) # We make the assignments. self._spike_clusters[spike_ids] = new_spike_clusters # OPTIM: we update spikes_per_cluster manually. new_spc = _spikes_per_cluster(new_spike_clusters, spike_ids) self._update_cluster_ids(to_remove=old_clusters, to_add=new_spc) return up
python
def _do_assign(self, spike_ids, new_spike_clusters): """Make spike-cluster assignments after the spike selection has been extended to full clusters.""" # Ensure spike_clusters has the right shape. spike_ids = _as_array(spike_ids) if len(new_spike_clusters) == 1 and len(spike_ids) > 1: new_spike_clusters = (np.ones(len(spike_ids), dtype=np.int64) * new_spike_clusters[0]) old_spike_clusters = self._spike_clusters[spike_ids] assert len(spike_ids) == len(old_spike_clusters) assert len(new_spike_clusters) == len(spike_ids) # Update the spikes per cluster structure. old_clusters = _unique(old_spike_clusters) # NOTE: shortcut to a merge if this assignment is effectively a merge # i.e. if all spikes are assigned to a single cluster. # The fact that spike selection has been previously extended to # whole clusters is critical here. new_clusters = _unique(new_spike_clusters) if len(new_clusters) == 1: return self._do_merge(spike_ids, old_clusters, new_clusters[0]) # We return the UpdateInfo structure. up = _assign_update_info(spike_ids, old_spike_clusters, new_spike_clusters) # We update the new cluster id (strictly increasing during a session). self._new_cluster_id = max(self._new_cluster_id, max(up.added) + 1) # We make the assignments. self._spike_clusters[spike_ids] = new_spike_clusters # OPTIM: we update spikes_per_cluster manually. new_spc = _spikes_per_cluster(new_spike_clusters, spike_ids) self._update_cluster_ids(to_remove=old_clusters, to_add=new_spc) return up
[ "def", "_do_assign", "(", "self", ",", "spike_ids", ",", "new_spike_clusters", ")", ":", "# Ensure spike_clusters has the right shape.", "spike_ids", "=", "_as_array", "(", "spike_ids", ")", "if", "len", "(", "new_spike_clusters", ")", "==", "1", "and", "len", "(", "spike_ids", ")", ">", "1", ":", "new_spike_clusters", "=", "(", "np", ".", "ones", "(", "len", "(", "spike_ids", ")", ",", "dtype", "=", "np", ".", "int64", ")", "*", "new_spike_clusters", "[", "0", "]", ")", "old_spike_clusters", "=", "self", ".", "_spike_clusters", "[", "spike_ids", "]", "assert", "len", "(", "spike_ids", ")", "==", "len", "(", "old_spike_clusters", ")", "assert", "len", "(", "new_spike_clusters", ")", "==", "len", "(", "spike_ids", ")", "# Update the spikes per cluster structure.", "old_clusters", "=", "_unique", "(", "old_spike_clusters", ")", "# NOTE: shortcut to a merge if this assignment is effectively a merge", "# i.e. if all spikes are assigned to a single cluster.", "# The fact that spike selection has been previously extended to", "# whole clusters is critical here.", "new_clusters", "=", "_unique", "(", "new_spike_clusters", ")", "if", "len", "(", "new_clusters", ")", "==", "1", ":", "return", "self", ".", "_do_merge", "(", "spike_ids", ",", "old_clusters", ",", "new_clusters", "[", "0", "]", ")", "# We return the UpdateInfo structure.", "up", "=", "_assign_update_info", "(", "spike_ids", ",", "old_spike_clusters", ",", "new_spike_clusters", ")", "# We update the new cluster id (strictly increasing during a session).", "self", ".", "_new_cluster_id", "=", "max", "(", "self", ".", "_new_cluster_id", ",", "max", "(", "up", ".", "added", ")", "+", "1", ")", "# We make the assignments.", "self", ".", "_spike_clusters", "[", "spike_ids", "]", "=", "new_spike_clusters", "# OPTIM: we update spikes_per_cluster manually.", "new_spc", "=", "_spikes_per_cluster", "(", "new_spike_clusters", ",", "spike_ids", ")", "self", ".", "_update_cluster_ids", "(", "to_remove", "=", "old_clusters", ",", "to_add", "=", "new_spc", ")", "return", "up" ]
Make spike-cluster assignments after the spike selection has been extended to full clusters.
[ "Make", "spike", "-", "cluster", "assignments", "after", "the", "spike", "selection", "has", "been", "extended", "to", "full", "clusters", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/clustering.py#L258-L296
train
kwikteam/phy
phy/cluster/clustering.py
Clustering.merge
def merge(self, cluster_ids, to=None): """Merge several clusters to a new cluster. Parameters ---------- cluster_ids : array-like List of clusters to merge. to : integer or None The id of the new cluster. By default, this is `new_cluster_id()`. Returns ------- up : UpdateInfo instance """ if not _is_array_like(cluster_ids): raise ValueError("The first argument should be a list or " "an array.") cluster_ids = sorted(cluster_ids) if not set(cluster_ids) <= set(self.cluster_ids): raise ValueError("Some clusters do not exist.") # Find the new cluster number. if to is None: to = self.new_cluster_id() if to < self.new_cluster_id(): raise ValueError("The new cluster numbers should be higher than " "{0}.".format(self.new_cluster_id())) # NOTE: we could have called self.assign() here, but we don't. # We circumvent self.assign() for performance reasons. # assign() is a relatively costly operation, whereas merging is a much # cheaper operation. # Find all spikes in the specified clusters. spike_ids = _spikes_in_clusters(self.spike_clusters, cluster_ids) up = self._do_merge(spike_ids, cluster_ids, to) undo_state = self.emit('request_undo_state', up) # Add to stack. self._undo_stack.add((spike_ids, [to], undo_state)) self.emit('cluster', up) return up
python
def merge(self, cluster_ids, to=None): """Merge several clusters to a new cluster. Parameters ---------- cluster_ids : array-like List of clusters to merge. to : integer or None The id of the new cluster. By default, this is `new_cluster_id()`. Returns ------- up : UpdateInfo instance """ if not _is_array_like(cluster_ids): raise ValueError("The first argument should be a list or " "an array.") cluster_ids = sorted(cluster_ids) if not set(cluster_ids) <= set(self.cluster_ids): raise ValueError("Some clusters do not exist.") # Find the new cluster number. if to is None: to = self.new_cluster_id() if to < self.new_cluster_id(): raise ValueError("The new cluster numbers should be higher than " "{0}.".format(self.new_cluster_id())) # NOTE: we could have called self.assign() here, but we don't. # We circumvent self.assign() for performance reasons. # assign() is a relatively costly operation, whereas merging is a much # cheaper operation. # Find all spikes in the specified clusters. spike_ids = _spikes_in_clusters(self.spike_clusters, cluster_ids) up = self._do_merge(spike_ids, cluster_ids, to) undo_state = self.emit('request_undo_state', up) # Add to stack. self._undo_stack.add((spike_ids, [to], undo_state)) self.emit('cluster', up) return up
[ "def", "merge", "(", "self", ",", "cluster_ids", ",", "to", "=", "None", ")", ":", "if", "not", "_is_array_like", "(", "cluster_ids", ")", ":", "raise", "ValueError", "(", "\"The first argument should be a list or \"", "\"an array.\"", ")", "cluster_ids", "=", "sorted", "(", "cluster_ids", ")", "if", "not", "set", "(", "cluster_ids", ")", "<=", "set", "(", "self", ".", "cluster_ids", ")", ":", "raise", "ValueError", "(", "\"Some clusters do not exist.\"", ")", "# Find the new cluster number.", "if", "to", "is", "None", ":", "to", "=", "self", ".", "new_cluster_id", "(", ")", "if", "to", "<", "self", ".", "new_cluster_id", "(", ")", ":", "raise", "ValueError", "(", "\"The new cluster numbers should be higher than \"", "\"{0}.\"", ".", "format", "(", "self", ".", "new_cluster_id", "(", ")", ")", ")", "# NOTE: we could have called self.assign() here, but we don't.", "# We circumvent self.assign() for performance reasons.", "# assign() is a relatively costly operation, whereas merging is a much", "# cheaper operation.", "# Find all spikes in the specified clusters.", "spike_ids", "=", "_spikes_in_clusters", "(", "self", ".", "spike_clusters", ",", "cluster_ids", ")", "up", "=", "self", ".", "_do_merge", "(", "spike_ids", ",", "cluster_ids", ",", "to", ")", "undo_state", "=", "self", ".", "emit", "(", "'request_undo_state'", ",", "up", ")", "# Add to stack.", "self", ".", "_undo_stack", ".", "add", "(", "(", "spike_ids", ",", "[", "to", "]", ",", "undo_state", ")", ")", "self", ".", "emit", "(", "'cluster'", ",", "up", ")", "return", "up" ]
Merge several clusters to a new cluster. Parameters ---------- cluster_ids : array-like List of clusters to merge. to : integer or None The id of the new cluster. By default, this is `new_cluster_id()`. Returns ------- up : UpdateInfo instance
[ "Merge", "several", "clusters", "to", "a", "new", "cluster", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/clustering.py#L320-L368
train
kwikteam/phy
phy/cluster/clustering.py
Clustering.assign
def assign(self, spike_ids, spike_clusters_rel=0): """Make new spike cluster assignments. Parameters ---------- spike_ids : array-like List of spike ids. spike_clusters_rel : array-like Relative cluster ids of the spikes in `spike_ids`. This must have the same size as `spike_ids`. Returns ------- up : UpdateInfo instance Note ---- `spike_clusters_rel` contain *relative* cluster indices. Their values don't matter: what matters is whether two give spikes should end up in the same cluster or not. Adding a constant number to all elements in `spike_clusters_rel` results in exactly the same operation. The final cluster ids are automatically generated by the `Clustering` class. This is because we must ensure that all modified clusters get brand new ids. The whole library is based on the assumption that cluster ids are unique and "disposable". Changing a cluster always results in a new cluster id being assigned. If a spike is assigned to a new cluster, then all other spikes belonging to the same cluster are assigned to a brand new cluster, even if they were not changed explicitely by the `assign()` method. In other words, the list of spikes affected by an `assign()` is almost always a strict superset of the `spike_ids` parameter. The only case where this is not true is when whole clusters change: this is called a merge. It is implemented in a separate `merge()` method because it is logically much simpler, and faster to execute. """ assert not isinstance(spike_ids, slice) # Ensure `spike_clusters_rel` is an array-like. if not hasattr(spike_clusters_rel, '__len__'): spike_clusters_rel = spike_clusters_rel * np.ones(len(spike_ids), dtype=np.int64) spike_ids = _as_array(spike_ids) if len(spike_ids) == 0: return UpdateInfo() assert len(spike_ids) == len(spike_clusters_rel) assert spike_ids.min() >= 0 assert spike_ids.max() < self._n_spikes, "Some spikes don't exist." # Normalize the spike-cluster assignment such that # there are only new or dead clusters, not modified clusters. # This implies that spikes not explicitly selected, but that # belong to clusters affected by the operation, will be assigned # to brand new clusters. spike_ids, cluster_ids = _extend_assignment(spike_ids, self._spike_clusters, spike_clusters_rel, self.new_cluster_id(), ) up = self._do_assign(spike_ids, cluster_ids) undo_state = self.emit('request_undo_state', up) # Add the assignment to the undo stack. self._undo_stack.add((spike_ids, cluster_ids, undo_state)) self.emit('cluster', up) return up
python
def assign(self, spike_ids, spike_clusters_rel=0): """Make new spike cluster assignments. Parameters ---------- spike_ids : array-like List of spike ids. spike_clusters_rel : array-like Relative cluster ids of the spikes in `spike_ids`. This must have the same size as `spike_ids`. Returns ------- up : UpdateInfo instance Note ---- `spike_clusters_rel` contain *relative* cluster indices. Their values don't matter: what matters is whether two give spikes should end up in the same cluster or not. Adding a constant number to all elements in `spike_clusters_rel` results in exactly the same operation. The final cluster ids are automatically generated by the `Clustering` class. This is because we must ensure that all modified clusters get brand new ids. The whole library is based on the assumption that cluster ids are unique and "disposable". Changing a cluster always results in a new cluster id being assigned. If a spike is assigned to a new cluster, then all other spikes belonging to the same cluster are assigned to a brand new cluster, even if they were not changed explicitely by the `assign()` method. In other words, the list of spikes affected by an `assign()` is almost always a strict superset of the `spike_ids` parameter. The only case where this is not true is when whole clusters change: this is called a merge. It is implemented in a separate `merge()` method because it is logically much simpler, and faster to execute. """ assert not isinstance(spike_ids, slice) # Ensure `spike_clusters_rel` is an array-like. if not hasattr(spike_clusters_rel, '__len__'): spike_clusters_rel = spike_clusters_rel * np.ones(len(spike_ids), dtype=np.int64) spike_ids = _as_array(spike_ids) if len(spike_ids) == 0: return UpdateInfo() assert len(spike_ids) == len(spike_clusters_rel) assert spike_ids.min() >= 0 assert spike_ids.max() < self._n_spikes, "Some spikes don't exist." # Normalize the spike-cluster assignment such that # there are only new or dead clusters, not modified clusters. # This implies that spikes not explicitly selected, but that # belong to clusters affected by the operation, will be assigned # to brand new clusters. spike_ids, cluster_ids = _extend_assignment(spike_ids, self._spike_clusters, spike_clusters_rel, self.new_cluster_id(), ) up = self._do_assign(spike_ids, cluster_ids) undo_state = self.emit('request_undo_state', up) # Add the assignment to the undo stack. self._undo_stack.add((spike_ids, cluster_ids, undo_state)) self.emit('cluster', up) return up
[ "def", "assign", "(", "self", ",", "spike_ids", ",", "spike_clusters_rel", "=", "0", ")", ":", "assert", "not", "isinstance", "(", "spike_ids", ",", "slice", ")", "# Ensure `spike_clusters_rel` is an array-like.", "if", "not", "hasattr", "(", "spike_clusters_rel", ",", "'__len__'", ")", ":", "spike_clusters_rel", "=", "spike_clusters_rel", "*", "np", ".", "ones", "(", "len", "(", "spike_ids", ")", ",", "dtype", "=", "np", ".", "int64", ")", "spike_ids", "=", "_as_array", "(", "spike_ids", ")", "if", "len", "(", "spike_ids", ")", "==", "0", ":", "return", "UpdateInfo", "(", ")", "assert", "len", "(", "spike_ids", ")", "==", "len", "(", "spike_clusters_rel", ")", "assert", "spike_ids", ".", "min", "(", ")", ">=", "0", "assert", "spike_ids", ".", "max", "(", ")", "<", "self", ".", "_n_spikes", ",", "\"Some spikes don't exist.\"", "# Normalize the spike-cluster assignment such that", "# there are only new or dead clusters, not modified clusters.", "# This implies that spikes not explicitly selected, but that", "# belong to clusters affected by the operation, will be assigned", "# to brand new clusters.", "spike_ids", ",", "cluster_ids", "=", "_extend_assignment", "(", "spike_ids", ",", "self", ".", "_spike_clusters", ",", "spike_clusters_rel", ",", "self", ".", "new_cluster_id", "(", ")", ",", ")", "up", "=", "self", ".", "_do_assign", "(", "spike_ids", ",", "cluster_ids", ")", "undo_state", "=", "self", ".", "emit", "(", "'request_undo_state'", ",", "up", ")", "# Add the assignment to the undo stack.", "self", ".", "_undo_stack", ".", "add", "(", "(", "spike_ids", ",", "cluster_ids", ",", "undo_state", ")", ")", "self", ".", "emit", "(", "'cluster'", ",", "up", ")", "return", "up" ]
Make new spike cluster assignments. Parameters ---------- spike_ids : array-like List of spike ids. spike_clusters_rel : array-like Relative cluster ids of the spikes in `spike_ids`. This must have the same size as `spike_ids`. Returns ------- up : UpdateInfo instance Note ---- `spike_clusters_rel` contain *relative* cluster indices. Their values don't matter: what matters is whether two give spikes should end up in the same cluster or not. Adding a constant number to all elements in `spike_clusters_rel` results in exactly the same operation. The final cluster ids are automatically generated by the `Clustering` class. This is because we must ensure that all modified clusters get brand new ids. The whole library is based on the assumption that cluster ids are unique and "disposable". Changing a cluster always results in a new cluster id being assigned. If a spike is assigned to a new cluster, then all other spikes belonging to the same cluster are assigned to a brand new cluster, even if they were not changed explicitely by the `assign()` method. In other words, the list of spikes affected by an `assign()` is almost always a strict superset of the `spike_ids` parameter. The only case where this is not true is when whole clusters change: this is called a merge. It is implemented in a separate `merge()` method because it is logically much simpler, and faster to execute.
[ "Make", "new", "spike", "cluster", "assignments", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/clustering.py#L370-L446
train
kwikteam/phy
phy/cluster/clustering.py
Clustering.undo
def undo(self): """Undo the last cluster assignment operation. Returns ------- up : UpdateInfo instance of the changes done by this operation. """ _, _, undo_state = self._undo_stack.back() # Retrieve the initial spike_cluster structure. spike_clusters_new = self._spike_clusters_base.copy() # Loop over the history (except the last item because we undo). for spike_ids, cluster_ids, _ in self._undo_stack: # We update the spike clusters accordingly. if spike_ids is not None: spike_clusters_new[spike_ids] = cluster_ids # What are the spikes affected by the last changes? changed = np.nonzero(self._spike_clusters != spike_clusters_new)[0] clusters_changed = spike_clusters_new[changed] up = self._do_assign(changed, clusters_changed) up.history = 'undo' # Add the undo_state object from the undone object. up.undo_state = undo_state self.emit('cluster', up) return up
python
def undo(self): """Undo the last cluster assignment operation. Returns ------- up : UpdateInfo instance of the changes done by this operation. """ _, _, undo_state = self._undo_stack.back() # Retrieve the initial spike_cluster structure. spike_clusters_new = self._spike_clusters_base.copy() # Loop over the history (except the last item because we undo). for spike_ids, cluster_ids, _ in self._undo_stack: # We update the spike clusters accordingly. if spike_ids is not None: spike_clusters_new[spike_ids] = cluster_ids # What are the spikes affected by the last changes? changed = np.nonzero(self._spike_clusters != spike_clusters_new)[0] clusters_changed = spike_clusters_new[changed] up = self._do_assign(changed, clusters_changed) up.history = 'undo' # Add the undo_state object from the undone object. up.undo_state = undo_state self.emit('cluster', up) return up
[ "def", "undo", "(", "self", ")", ":", "_", ",", "_", ",", "undo_state", "=", "self", ".", "_undo_stack", ".", "back", "(", ")", "# Retrieve the initial spike_cluster structure.", "spike_clusters_new", "=", "self", ".", "_spike_clusters_base", ".", "copy", "(", ")", "# Loop over the history (except the last item because we undo).", "for", "spike_ids", ",", "cluster_ids", ",", "_", "in", "self", ".", "_undo_stack", ":", "# We update the spike clusters accordingly.", "if", "spike_ids", "is", "not", "None", ":", "spike_clusters_new", "[", "spike_ids", "]", "=", "cluster_ids", "# What are the spikes affected by the last changes?", "changed", "=", "np", ".", "nonzero", "(", "self", ".", "_spike_clusters", "!=", "spike_clusters_new", ")", "[", "0", "]", "clusters_changed", "=", "spike_clusters_new", "[", "changed", "]", "up", "=", "self", ".", "_do_assign", "(", "changed", ",", "clusters_changed", ")", "up", ".", "history", "=", "'undo'", "# Add the undo_state object from the undone object.", "up", ".", "undo_state", "=", "undo_state", "self", ".", "emit", "(", "'cluster'", ",", "up", ")", "return", "up" ]
Undo the last cluster assignment operation. Returns ------- up : UpdateInfo instance of the changes done by this operation.
[ "Undo", "the", "last", "cluster", "assignment", "operation", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/clustering.py#L477-L508
train
kwikteam/phy
phy/cluster/clustering.py
Clustering.redo
def redo(self): """Redo the last cluster assignment operation. Returns ------- up : UpdateInfo instance of the changes done by this operation. """ # Go forward in the stack, and retrieve the new assignment. item = self._undo_stack.forward() if item is None: # No redo has been performed: abort. return # NOTE: the undo_state object is only returned when undoing. # It represents data associated to the state # *before* the action. What might be more useful would be the # undo_state object of the next item in the list (if it exists). spike_ids, cluster_ids, undo_state = item assert spike_ids is not None # We apply the new assignment. up = self._do_assign(spike_ids, cluster_ids) up.history = 'redo' self.emit('cluster', up) return up
python
def redo(self): """Redo the last cluster assignment operation. Returns ------- up : UpdateInfo instance of the changes done by this operation. """ # Go forward in the stack, and retrieve the new assignment. item = self._undo_stack.forward() if item is None: # No redo has been performed: abort. return # NOTE: the undo_state object is only returned when undoing. # It represents data associated to the state # *before* the action. What might be more useful would be the # undo_state object of the next item in the list (if it exists). spike_ids, cluster_ids, undo_state = item assert spike_ids is not None # We apply the new assignment. up = self._do_assign(spike_ids, cluster_ids) up.history = 'redo' self.emit('cluster', up) return up
[ "def", "redo", "(", "self", ")", ":", "# Go forward in the stack, and retrieve the new assignment.", "item", "=", "self", ".", "_undo_stack", ".", "forward", "(", ")", "if", "item", "is", "None", ":", "# No redo has been performed: abort.", "return", "# NOTE: the undo_state object is only returned when undoing.", "# It represents data associated to the state", "# *before* the action. What might be more useful would be the", "# undo_state object of the next item in the list (if it exists).", "spike_ids", ",", "cluster_ids", ",", "undo_state", "=", "item", "assert", "spike_ids", "is", "not", "None", "# We apply the new assignment.", "up", "=", "self", ".", "_do_assign", "(", "spike_ids", ",", "cluster_ids", ")", "up", ".", "history", "=", "'redo'", "self", ".", "emit", "(", "'cluster'", ",", "up", ")", "return", "up" ]
Redo the last cluster assignment operation. Returns ------- up : UpdateInfo instance of the changes done by this operation.
[ "Redo", "the", "last", "cluster", "assignment", "operation", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/clustering.py#L510-L537
train
kwikteam/phy
phy/stats/ccg.py
_increment
def _increment(arr, indices): """Increment some indices in a 1D vector of non-negative integers. Repeated indices are taken into account.""" arr = _as_array(arr) indices = _as_array(indices) bbins = np.bincount(indices) arr[:len(bbins)] += bbins return arr
python
def _increment(arr, indices): """Increment some indices in a 1D vector of non-negative integers. Repeated indices are taken into account.""" arr = _as_array(arr) indices = _as_array(indices) bbins = np.bincount(indices) arr[:len(bbins)] += bbins return arr
[ "def", "_increment", "(", "arr", ",", "indices", ")", ":", "arr", "=", "_as_array", "(", "arr", ")", "indices", "=", "_as_array", "(", "indices", ")", "bbins", "=", "np", ".", "bincount", "(", "indices", ")", "arr", "[", ":", "len", "(", "bbins", ")", "]", "+=", "bbins", "return", "arr" ]
Increment some indices in a 1D vector of non-negative integers. Repeated indices are taken into account.
[ "Increment", "some", "indices", "in", "a", "1D", "vector", "of", "non", "-", "negative", "integers", ".", "Repeated", "indices", "are", "taken", "into", "account", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/stats/ccg.py#L19-L26
train
kwikteam/phy
phy/stats/ccg.py
_symmetrize_correlograms
def _symmetrize_correlograms(correlograms): """Return the symmetrized version of the CCG arrays.""" n_clusters, _, n_bins = correlograms.shape assert n_clusters == _ # We symmetrize c[i, j, 0]. # This is necessary because the algorithm in correlograms() # is sensitive to the order of identical spikes. correlograms[..., 0] = np.maximum(correlograms[..., 0], correlograms[..., 0].T) sym = correlograms[..., 1:][..., ::-1] sym = np.transpose(sym, (1, 0, 2)) return np.dstack((sym, correlograms))
python
def _symmetrize_correlograms(correlograms): """Return the symmetrized version of the CCG arrays.""" n_clusters, _, n_bins = correlograms.shape assert n_clusters == _ # We symmetrize c[i, j, 0]. # This is necessary because the algorithm in correlograms() # is sensitive to the order of identical spikes. correlograms[..., 0] = np.maximum(correlograms[..., 0], correlograms[..., 0].T) sym = correlograms[..., 1:][..., ::-1] sym = np.transpose(sym, (1, 0, 2)) return np.dstack((sym, correlograms))
[ "def", "_symmetrize_correlograms", "(", "correlograms", ")", ":", "n_clusters", ",", "_", ",", "n_bins", "=", "correlograms", ".", "shape", "assert", "n_clusters", "==", "_", "# We symmetrize c[i, j, 0].", "# This is necessary because the algorithm in correlograms()", "# is sensitive to the order of identical spikes.", "correlograms", "[", "...", ",", "0", "]", "=", "np", ".", "maximum", "(", "correlograms", "[", "...", ",", "0", "]", ",", "correlograms", "[", "...", ",", "0", "]", ".", "T", ")", "sym", "=", "correlograms", "[", "...", ",", "1", ":", "]", "[", "...", ",", ":", ":", "-", "1", "]", "sym", "=", "np", ".", "transpose", "(", "sym", ",", "(", "1", ",", "0", ",", "2", ")", ")", "return", "np", ".", "dstack", "(", "(", "sym", ",", "correlograms", ")", ")" ]
Return the symmetrized version of the CCG arrays.
[ "Return", "the", "symmetrized", "version", "of", "the", "CCG", "arrays", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/stats/ccg.py#L39-L54
train
kwikteam/phy
phy/stats/ccg.py
correlograms
def correlograms(spike_times, spike_clusters, cluster_ids=None, sample_rate=1., bin_size=None, window_size=None, symmetrize=True, ): """Compute all pairwise cross-correlograms among the clusters appearing in `spike_clusters`. Parameters ---------- spike_times : array-like Spike times in seconds. spike_clusters : array-like Spike-cluster mapping. cluster_ids : array-like The list of unique clusters, in any order. That order will be used in the output array. bin_size : float Size of the bin, in seconds. window_size : float Size of the window, in seconds. Returns ------- correlograms : array A `(n_clusters, n_clusters, winsize_samples)` array with all pairwise CCGs. """ assert sample_rate > 0. assert np.all(np.diff(spike_times) >= 0), ("The spike times must be " "increasing.") # Get the spike samples. spike_times = np.asarray(spike_times, dtype=np.float64) spike_samples = (spike_times * sample_rate).astype(np.int64) spike_clusters = _as_array(spike_clusters) assert spike_samples.ndim == 1 assert spike_samples.shape == spike_clusters.shape # Find `binsize`. bin_size = np.clip(bin_size, 1e-5, 1e5) # in seconds binsize = int(sample_rate * bin_size) # in samples assert binsize >= 1 # Find `winsize_bins`. window_size = np.clip(window_size, 1e-5, 1e5) # in seconds winsize_bins = 2 * int(.5 * window_size / bin_size) + 1 assert winsize_bins >= 1 assert winsize_bins % 2 == 1 # Take the cluster oder into account. if cluster_ids is None: clusters = _unique(spike_clusters) else: clusters = _as_array(cluster_ids) n_clusters = len(clusters) # Like spike_clusters, but with 0..n_clusters-1 indices. spike_clusters_i = _index_of(spike_clusters, clusters) # Shift between the two copies of the spike trains. shift = 1 # At a given shift, the mask precises which spikes have matching spikes # within the correlogram time window. mask = np.ones_like(spike_samples, dtype=np.bool) correlograms = _create_correlograms_array(n_clusters, winsize_bins) # The loop continues as long as there is at least one spike with # a matching spike. while mask[:-shift].any(): # Number of time samples between spike i and spike i+shift. spike_diff = _diff_shifted(spike_samples, shift) # Binarize the delays between spike i and spike i+shift. spike_diff_b = spike_diff // binsize # Spikes with no matching spikes are masked. mask[:-shift][spike_diff_b > (winsize_bins // 2)] = False # Cache the masked spike delays. m = mask[:-shift].copy() d = spike_diff_b[m] # # Update the masks given the clusters to update. # m0 = np.in1d(spike_clusters[:-shift], clusters) # m = m & m0 # d = spike_diff_b[m] d = spike_diff_b[m] # Find the indices in the raveled correlograms array that need # to be incremented, taking into account the spike clusters. indices = np.ravel_multi_index((spike_clusters_i[:-shift][m], spike_clusters_i[+shift:][m], d), correlograms.shape) # Increment the matching spikes in the correlograms array. _increment(correlograms.ravel(), indices) shift += 1 # Remove ACG peaks. correlograms[np.arange(n_clusters), np.arange(n_clusters), 0] = 0 if symmetrize: return _symmetrize_correlograms(correlograms) else: return correlograms
python
def correlograms(spike_times, spike_clusters, cluster_ids=None, sample_rate=1., bin_size=None, window_size=None, symmetrize=True, ): """Compute all pairwise cross-correlograms among the clusters appearing in `spike_clusters`. Parameters ---------- spike_times : array-like Spike times in seconds. spike_clusters : array-like Spike-cluster mapping. cluster_ids : array-like The list of unique clusters, in any order. That order will be used in the output array. bin_size : float Size of the bin, in seconds. window_size : float Size of the window, in seconds. Returns ------- correlograms : array A `(n_clusters, n_clusters, winsize_samples)` array with all pairwise CCGs. """ assert sample_rate > 0. assert np.all(np.diff(spike_times) >= 0), ("The spike times must be " "increasing.") # Get the spike samples. spike_times = np.asarray(spike_times, dtype=np.float64) spike_samples = (spike_times * sample_rate).astype(np.int64) spike_clusters = _as_array(spike_clusters) assert spike_samples.ndim == 1 assert spike_samples.shape == spike_clusters.shape # Find `binsize`. bin_size = np.clip(bin_size, 1e-5, 1e5) # in seconds binsize = int(sample_rate * bin_size) # in samples assert binsize >= 1 # Find `winsize_bins`. window_size = np.clip(window_size, 1e-5, 1e5) # in seconds winsize_bins = 2 * int(.5 * window_size / bin_size) + 1 assert winsize_bins >= 1 assert winsize_bins % 2 == 1 # Take the cluster oder into account. if cluster_ids is None: clusters = _unique(spike_clusters) else: clusters = _as_array(cluster_ids) n_clusters = len(clusters) # Like spike_clusters, but with 0..n_clusters-1 indices. spike_clusters_i = _index_of(spike_clusters, clusters) # Shift between the two copies of the spike trains. shift = 1 # At a given shift, the mask precises which spikes have matching spikes # within the correlogram time window. mask = np.ones_like(spike_samples, dtype=np.bool) correlograms = _create_correlograms_array(n_clusters, winsize_bins) # The loop continues as long as there is at least one spike with # a matching spike. while mask[:-shift].any(): # Number of time samples between spike i and spike i+shift. spike_diff = _diff_shifted(spike_samples, shift) # Binarize the delays between spike i and spike i+shift. spike_diff_b = spike_diff // binsize # Spikes with no matching spikes are masked. mask[:-shift][spike_diff_b > (winsize_bins // 2)] = False # Cache the masked spike delays. m = mask[:-shift].copy() d = spike_diff_b[m] # # Update the masks given the clusters to update. # m0 = np.in1d(spike_clusters[:-shift], clusters) # m = m & m0 # d = spike_diff_b[m] d = spike_diff_b[m] # Find the indices in the raveled correlograms array that need # to be incremented, taking into account the spike clusters. indices = np.ravel_multi_index((spike_clusters_i[:-shift][m], spike_clusters_i[+shift:][m], d), correlograms.shape) # Increment the matching spikes in the correlograms array. _increment(correlograms.ravel(), indices) shift += 1 # Remove ACG peaks. correlograms[np.arange(n_clusters), np.arange(n_clusters), 0] = 0 if symmetrize: return _symmetrize_correlograms(correlograms) else: return correlograms
[ "def", "correlograms", "(", "spike_times", ",", "spike_clusters", ",", "cluster_ids", "=", "None", ",", "sample_rate", "=", "1.", ",", "bin_size", "=", "None", ",", "window_size", "=", "None", ",", "symmetrize", "=", "True", ",", ")", ":", "assert", "sample_rate", ">", "0.", "assert", "np", ".", "all", "(", "np", ".", "diff", "(", "spike_times", ")", ">=", "0", ")", ",", "(", "\"The spike times must be \"", "\"increasing.\"", ")", "# Get the spike samples.", "spike_times", "=", "np", ".", "asarray", "(", "spike_times", ",", "dtype", "=", "np", ".", "float64", ")", "spike_samples", "=", "(", "spike_times", "*", "sample_rate", ")", ".", "astype", "(", "np", ".", "int64", ")", "spike_clusters", "=", "_as_array", "(", "spike_clusters", ")", "assert", "spike_samples", ".", "ndim", "==", "1", "assert", "spike_samples", ".", "shape", "==", "spike_clusters", ".", "shape", "# Find `binsize`.", "bin_size", "=", "np", ".", "clip", "(", "bin_size", ",", "1e-5", ",", "1e5", ")", "# in seconds", "binsize", "=", "int", "(", "sample_rate", "*", "bin_size", ")", "# in samples", "assert", "binsize", ">=", "1", "# Find `winsize_bins`.", "window_size", "=", "np", ".", "clip", "(", "window_size", ",", "1e-5", ",", "1e5", ")", "# in seconds", "winsize_bins", "=", "2", "*", "int", "(", ".5", "*", "window_size", "/", "bin_size", ")", "+", "1", "assert", "winsize_bins", ">=", "1", "assert", "winsize_bins", "%", "2", "==", "1", "# Take the cluster oder into account.", "if", "cluster_ids", "is", "None", ":", "clusters", "=", "_unique", "(", "spike_clusters", ")", "else", ":", "clusters", "=", "_as_array", "(", "cluster_ids", ")", "n_clusters", "=", "len", "(", "clusters", ")", "# Like spike_clusters, but with 0..n_clusters-1 indices.", "spike_clusters_i", "=", "_index_of", "(", "spike_clusters", ",", "clusters", ")", "# Shift between the two copies of the spike trains.", "shift", "=", "1", "# At a given shift, the mask precises which spikes have matching spikes", "# within the correlogram time window.", "mask", "=", "np", ".", "ones_like", "(", "spike_samples", ",", "dtype", "=", "np", ".", "bool", ")", "correlograms", "=", "_create_correlograms_array", "(", "n_clusters", ",", "winsize_bins", ")", "# The loop continues as long as there is at least one spike with", "# a matching spike.", "while", "mask", "[", ":", "-", "shift", "]", ".", "any", "(", ")", ":", "# Number of time samples between spike i and spike i+shift.", "spike_diff", "=", "_diff_shifted", "(", "spike_samples", ",", "shift", ")", "# Binarize the delays between spike i and spike i+shift.", "spike_diff_b", "=", "spike_diff", "//", "binsize", "# Spikes with no matching spikes are masked.", "mask", "[", ":", "-", "shift", "]", "[", "spike_diff_b", ">", "(", "winsize_bins", "//", "2", ")", "]", "=", "False", "# Cache the masked spike delays.", "m", "=", "mask", "[", ":", "-", "shift", "]", ".", "copy", "(", ")", "d", "=", "spike_diff_b", "[", "m", "]", "# # Update the masks given the clusters to update.", "# m0 = np.in1d(spike_clusters[:-shift], clusters)", "# m = m & m0", "# d = spike_diff_b[m]", "d", "=", "spike_diff_b", "[", "m", "]", "# Find the indices in the raveled correlograms array that need", "# to be incremented, taking into account the spike clusters.", "indices", "=", "np", ".", "ravel_multi_index", "(", "(", "spike_clusters_i", "[", ":", "-", "shift", "]", "[", "m", "]", ",", "spike_clusters_i", "[", "+", "shift", ":", "]", "[", "m", "]", ",", "d", ")", ",", "correlograms", ".", "shape", ")", "# Increment the matching spikes in the correlograms array.", "_increment", "(", "correlograms", ".", "ravel", "(", ")", ",", "indices", ")", "shift", "+=", "1", "# Remove ACG peaks.", "correlograms", "[", "np", ".", "arange", "(", "n_clusters", ")", ",", "np", ".", "arange", "(", "n_clusters", ")", ",", "0", "]", "=", "0", "if", "symmetrize", ":", "return", "_symmetrize_correlograms", "(", "correlograms", ")", "else", ":", "return", "correlograms" ]
Compute all pairwise cross-correlograms among the clusters appearing in `spike_clusters`. Parameters ---------- spike_times : array-like Spike times in seconds. spike_clusters : array-like Spike-cluster mapping. cluster_ids : array-like The list of unique clusters, in any order. That order will be used in the output array. bin_size : float Size of the bin, in seconds. window_size : float Size of the window, in seconds. Returns ------- correlograms : array A `(n_clusters, n_clusters, winsize_samples)` array with all pairwise CCGs.
[ "Compute", "all", "pairwise", "cross", "-", "correlograms", "among", "the", "clusters", "appearing", "in", "spike_clusters", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/stats/ccg.py#L57-L177
train
kwikteam/phy
phy/cluster/views/correlogram.py
CorrelogramView.set_bin_window
def set_bin_window(self, bin_size=None, window_size=None): """Set the bin and window sizes.""" bin_size = bin_size or self.bin_size window_size = window_size or self.window_size assert 1e-6 < bin_size < 1e3 assert 1e-6 < window_size < 1e3 assert bin_size < window_size self.bin_size = bin_size self.window_size = window_size # Set the status message. b, w = self.bin_size * 1000, self.window_size * 1000 self.set_status('Bin: {:.1f} ms. Window: {:.1f} ms.'.format(b, w))
python
def set_bin_window(self, bin_size=None, window_size=None): """Set the bin and window sizes.""" bin_size = bin_size or self.bin_size window_size = window_size or self.window_size assert 1e-6 < bin_size < 1e3 assert 1e-6 < window_size < 1e3 assert bin_size < window_size self.bin_size = bin_size self.window_size = window_size # Set the status message. b, w = self.bin_size * 1000, self.window_size * 1000 self.set_status('Bin: {:.1f} ms. Window: {:.1f} ms.'.format(b, w))
[ "def", "set_bin_window", "(", "self", ",", "bin_size", "=", "None", ",", "window_size", "=", "None", ")", ":", "bin_size", "=", "bin_size", "or", "self", ".", "bin_size", "window_size", "=", "window_size", "or", "self", ".", "window_size", "assert", "1e-6", "<", "bin_size", "<", "1e3", "assert", "1e-6", "<", "window_size", "<", "1e3", "assert", "bin_size", "<", "window_size", "self", ".", "bin_size", "=", "bin_size", "self", ".", "window_size", "=", "window_size", "# Set the status message.", "b", ",", "w", "=", "self", ".", "bin_size", "*", "1000", ",", "self", ".", "window_size", "*", "1000", "self", ".", "set_status", "(", "'Bin: {:.1f} ms. Window: {:.1f} ms.'", ".", "format", "(", "b", ",", "w", ")", ")" ]
Set the bin and window sizes.
[ "Set", "the", "bin", "and", "window", "sizes", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/views/correlogram.py#L56-L67
train
kwikteam/phy
phy/io/datasets.py
_md5
def _md5(path, blocksize=2 ** 20): """Compute the checksum of a file.""" m = hashlib.md5() with open(path, 'rb') as f: while True: buf = f.read(blocksize) if not buf: break m.update(buf) return m.hexdigest()
python
def _md5(path, blocksize=2 ** 20): """Compute the checksum of a file.""" m = hashlib.md5() with open(path, 'rb') as f: while True: buf = f.read(blocksize) if not buf: break m.update(buf) return m.hexdigest()
[ "def", "_md5", "(", "path", ",", "blocksize", "=", "2", "**", "20", ")", ":", "m", "=", "hashlib", ".", "md5", "(", ")", "with", "open", "(", "path", ",", "'rb'", ")", "as", "f", ":", "while", "True", ":", "buf", "=", "f", ".", "read", "(", "blocksize", ")", "if", "not", "buf", ":", "break", "m", ".", "update", "(", "buf", ")", "return", "m", ".", "hexdigest", "(", ")" ]
Compute the checksum of a file.
[ "Compute", "the", "checksum", "of", "a", "file", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/datasets.py#L66-L75
train
kwikteam/phy
phy/io/datasets.py
download_file
def download_file(url, output_path): """Download a binary file from an URL. The checksum will be downloaded from `URL + .md5`. If this download succeeds, the file's MD5 will be compared to the expected checksum. Parameters ---------- url : str The file's URL. output_path : str The path where the file is to be saved. """ output_path = op.realpath(output_path) assert output_path is not None if op.exists(output_path): checked = _check_md5_of_url(output_path, url) if checked is False: logger.debug("The file `%s` already exists " "but is invalid: redownloading.", output_path) elif checked is True: logger.debug("The file `%s` already exists: skipping.", output_path) return output_path r = _download(url, stream=True) _save_stream(r, output_path) if _check_md5_of_url(output_path, url) is False: logger.debug("The checksum doesn't match: retrying the download.") r = _download(url, stream=True) _save_stream(r, output_path) if _check_md5_of_url(output_path, url) is False: raise RuntimeError("The checksum of the downloaded file " "doesn't match the provided checksum.") return
python
def download_file(url, output_path): """Download a binary file from an URL. The checksum will be downloaded from `URL + .md5`. If this download succeeds, the file's MD5 will be compared to the expected checksum. Parameters ---------- url : str The file's URL. output_path : str The path where the file is to be saved. """ output_path = op.realpath(output_path) assert output_path is not None if op.exists(output_path): checked = _check_md5_of_url(output_path, url) if checked is False: logger.debug("The file `%s` already exists " "but is invalid: redownloading.", output_path) elif checked is True: logger.debug("The file `%s` already exists: skipping.", output_path) return output_path r = _download(url, stream=True) _save_stream(r, output_path) if _check_md5_of_url(output_path, url) is False: logger.debug("The checksum doesn't match: retrying the download.") r = _download(url, stream=True) _save_stream(r, output_path) if _check_md5_of_url(output_path, url) is False: raise RuntimeError("The checksum of the downloaded file " "doesn't match the provided checksum.") return
[ "def", "download_file", "(", "url", ",", "output_path", ")", ":", "output_path", "=", "op", ".", "realpath", "(", "output_path", ")", "assert", "output_path", "is", "not", "None", "if", "op", ".", "exists", "(", "output_path", ")", ":", "checked", "=", "_check_md5_of_url", "(", "output_path", ",", "url", ")", "if", "checked", "is", "False", ":", "logger", ".", "debug", "(", "\"The file `%s` already exists \"", "\"but is invalid: redownloading.\"", ",", "output_path", ")", "elif", "checked", "is", "True", ":", "logger", ".", "debug", "(", "\"The file `%s` already exists: skipping.\"", ",", "output_path", ")", "return", "output_path", "r", "=", "_download", "(", "url", ",", "stream", "=", "True", ")", "_save_stream", "(", "r", ",", "output_path", ")", "if", "_check_md5_of_url", "(", "output_path", ",", "url", ")", "is", "False", ":", "logger", ".", "debug", "(", "\"The checksum doesn't match: retrying the download.\"", ")", "r", "=", "_download", "(", "url", ",", "stream", "=", "True", ")", "_save_stream", "(", "r", ",", "output_path", ")", "if", "_check_md5_of_url", "(", "output_path", ",", "url", ")", "is", "False", ":", "raise", "RuntimeError", "(", "\"The checksum of the downloaded file \"", "\"doesn't match the provided checksum.\"", ")", "return" ]
Download a binary file from an URL. The checksum will be downloaded from `URL + .md5`. If this download succeeds, the file's MD5 will be compared to the expected checksum. Parameters ---------- url : str The file's URL. output_path : str The path where the file is to be saved.
[ "Download", "a", "binary", "file", "from", "an", "URL", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/datasets.py#L103-L138
train
kwikteam/phy
phy/plot/plot.py
_make_class
def _make_class(cls, **kwargs): """Return a custom Visual class with given parameters.""" kwargs = {k: (v if v is not None else getattr(cls, k, None)) for k, v in kwargs.items()} # The class name contains a hash of the custom parameters. name = cls.__name__ + '_' + _hash(kwargs) if name not in _CLASSES: logger.log(5, "Create class %s %s.", name, kwargs) cls = type(name, (cls,), kwargs) _CLASSES[name] = cls return _CLASSES[name]
python
def _make_class(cls, **kwargs): """Return a custom Visual class with given parameters.""" kwargs = {k: (v if v is not None else getattr(cls, k, None)) for k, v in kwargs.items()} # The class name contains a hash of the custom parameters. name = cls.__name__ + '_' + _hash(kwargs) if name not in _CLASSES: logger.log(5, "Create class %s %s.", name, kwargs) cls = type(name, (cls,), kwargs) _CLASSES[name] = cls return _CLASSES[name]
[ "def", "_make_class", "(", "cls", ",", "*", "*", "kwargs", ")", ":", "kwargs", "=", "{", "k", ":", "(", "v", "if", "v", "is", "not", "None", "else", "getattr", "(", "cls", ",", "k", ",", "None", ")", ")", "for", "k", ",", "v", "in", "kwargs", ".", "items", "(", ")", "}", "# The class name contains a hash of the custom parameters.", "name", "=", "cls", ".", "__name__", "+", "'_'", "+", "_hash", "(", "kwargs", ")", "if", "name", "not", "in", "_CLASSES", ":", "logger", ".", "log", "(", "5", ",", "\"Create class %s %s.\"", ",", "name", ",", "kwargs", ")", "cls", "=", "type", "(", "name", ",", "(", "cls", ",", ")", ",", "kwargs", ")", "_CLASSES", "[", "name", "]", "=", "cls", "return", "_CLASSES", "[", "name", "]" ]
Return a custom Visual class with given parameters.
[ "Return", "a", "custom", "Visual", "class", "with", "given", "parameters", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/plot.py#L46-L56
train
kwikteam/phy
phy/plot/plot.py
View._add_item
def _add_item(self, cls, *args, **kwargs): """Add a plot item.""" box_index = kwargs.pop('box_index', self._default_box_index) data = cls.validate(*args, **kwargs) n = cls.vertex_count(**data) if not isinstance(box_index, np.ndarray): k = len(self._default_box_index) box_index = _get_array(box_index, (n, k)) data['box_index'] = box_index if cls not in self._items: self._items[cls] = [] self._items[cls].append(data) return data
python
def _add_item(self, cls, *args, **kwargs): """Add a plot item.""" box_index = kwargs.pop('box_index', self._default_box_index) data = cls.validate(*args, **kwargs) n = cls.vertex_count(**data) if not isinstance(box_index, np.ndarray): k = len(self._default_box_index) box_index = _get_array(box_index, (n, k)) data['box_index'] = box_index if cls not in self._items: self._items[cls] = [] self._items[cls].append(data) return data
[ "def", "_add_item", "(", "self", ",", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "box_index", "=", "kwargs", ".", "pop", "(", "'box_index'", ",", "self", ".", "_default_box_index", ")", "data", "=", "cls", ".", "validate", "(", "*", "args", ",", "*", "*", "kwargs", ")", "n", "=", "cls", ".", "vertex_count", "(", "*", "*", "data", ")", "if", "not", "isinstance", "(", "box_index", ",", "np", ".", "ndarray", ")", ":", "k", "=", "len", "(", "self", ".", "_default_box_index", ")", "box_index", "=", "_get_array", "(", "box_index", ",", "(", "n", ",", "k", ")", ")", "data", "[", "'box_index'", "]", "=", "box_index", "if", "cls", "not", "in", "self", ".", "_items", ":", "self", ".", "_items", "[", "cls", "]", "=", "[", "]", "self", ".", "_items", "[", "cls", "]", ".", "append", "(", "data", ")", "return", "data" ]
Add a plot item.
[ "Add", "a", "plot", "item", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/plot.py#L118-L133
train
kwikteam/phy
phy/plot/plot.py
View.scatter
def scatter(self, *args, **kwargs): """Add a scatter plot.""" cls = _make_class(ScatterVisual, _default_marker=kwargs.pop('marker', None), ) return self._add_item(cls, *args, **kwargs)
python
def scatter(self, *args, **kwargs): """Add a scatter plot.""" cls = _make_class(ScatterVisual, _default_marker=kwargs.pop('marker', None), ) return self._add_item(cls, *args, **kwargs)
[ "def", "scatter", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "cls", "=", "_make_class", "(", "ScatterVisual", ",", "_default_marker", "=", "kwargs", ".", "pop", "(", "'marker'", ",", "None", ")", ",", ")", "return", "self", ".", "_add_item", "(", "cls", ",", "*", "args", ",", "*", "*", "kwargs", ")" ]
Add a scatter plot.
[ "Add", "a", "scatter", "plot", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/plot.py#L153-L158
train
kwikteam/phy
phy/plot/plot.py
View.build
def build(self): """Build all added items. Visuals are created, added, and built. The `set_data()` methods can be called afterwards. """ for cls, data_list in self._items.items(): # Some variables are not concatenated. They are specified # in `allow_list`. data = _accumulate(data_list, cls.allow_list) box_index = data.pop('box_index') visual = cls() self.add_visual(visual) visual.set_data(**data) # NOTE: visual.program.__contains__ is implemented in vispy master # so we can replace this with `if 'a_box_index' in visual.program` # after the next VisPy release. if 'a_box_index' in visual.program._code_variables: visual.program['a_box_index'] = box_index.astype(np.float32) # TODO: refactor this when there is the possibility to update existing # visuals without recreating the whole scene. if self.lasso: self.lasso.create_visual() self.update()
python
def build(self): """Build all added items. Visuals are created, added, and built. The `set_data()` methods can be called afterwards. """ for cls, data_list in self._items.items(): # Some variables are not concatenated. They are specified # in `allow_list`. data = _accumulate(data_list, cls.allow_list) box_index = data.pop('box_index') visual = cls() self.add_visual(visual) visual.set_data(**data) # NOTE: visual.program.__contains__ is implemented in vispy master # so we can replace this with `if 'a_box_index' in visual.program` # after the next VisPy release. if 'a_box_index' in visual.program._code_variables: visual.program['a_box_index'] = box_index.astype(np.float32) # TODO: refactor this when there is the possibility to update existing # visuals without recreating the whole scene. if self.lasso: self.lasso.create_visual() self.update()
[ "def", "build", "(", "self", ")", ":", "for", "cls", ",", "data_list", "in", "self", ".", "_items", ".", "items", "(", ")", ":", "# Some variables are not concatenated. They are specified", "# in `allow_list`.", "data", "=", "_accumulate", "(", "data_list", ",", "cls", ".", "allow_list", ")", "box_index", "=", "data", ".", "pop", "(", "'box_index'", ")", "visual", "=", "cls", "(", ")", "self", ".", "add_visual", "(", "visual", ")", "visual", ".", "set_data", "(", "*", "*", "data", ")", "# NOTE: visual.program.__contains__ is implemented in vispy master", "# so we can replace this with `if 'a_box_index' in visual.program`", "# after the next VisPy release.", "if", "'a_box_index'", "in", "visual", ".", "program", ".", "_code_variables", ":", "visual", ".", "program", "[", "'a_box_index'", "]", "=", "box_index", ".", "astype", "(", "np", ".", "float32", ")", "# TODO: refactor this when there is the possibility to update existing", "# visuals without recreating the whole scene.", "if", "self", ".", "lasso", ":", "self", ".", "lasso", ".", "create_visual", "(", ")", "self", ".", "update", "(", ")" ]
Build all added items. Visuals are created, added, and built. The `set_data()` methods can be called afterwards.
[ "Build", "all", "added", "items", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/plot.py#L176-L200
train
kwikteam/phy
phy/io/array.py
_range_from_slice
def _range_from_slice(myslice, start=None, stop=None, step=None, length=None): """Convert a slice to an array of integers.""" assert isinstance(myslice, slice) # Find 'step'. step = myslice.step if myslice.step is not None else step if step is None: step = 1 # Find 'start'. start = myslice.start if myslice.start is not None else start if start is None: start = 0 # Find 'stop' as a function of length if 'stop' is unspecified. stop = myslice.stop if myslice.stop is not None else stop if length is not None: stop_inferred = floor(start + step * length) if stop is not None and stop < stop_inferred: raise ValueError("'stop' ({stop}) and ".format(stop=stop) + "'length' ({length}) ".format(length=length) + "are not compatible.") stop = stop_inferred if stop is None and length is None: raise ValueError("'stop' and 'length' cannot be both unspecified.") myrange = np.arange(start, stop, step) # Check the length if it was specified. if length is not None: assert len(myrange) == length return myrange
python
def _range_from_slice(myslice, start=None, stop=None, step=None, length=None): """Convert a slice to an array of integers.""" assert isinstance(myslice, slice) # Find 'step'. step = myslice.step if myslice.step is not None else step if step is None: step = 1 # Find 'start'. start = myslice.start if myslice.start is not None else start if start is None: start = 0 # Find 'stop' as a function of length if 'stop' is unspecified. stop = myslice.stop if myslice.stop is not None else stop if length is not None: stop_inferred = floor(start + step * length) if stop is not None and stop < stop_inferred: raise ValueError("'stop' ({stop}) and ".format(stop=stop) + "'length' ({length}) ".format(length=length) + "are not compatible.") stop = stop_inferred if stop is None and length is None: raise ValueError("'stop' and 'length' cannot be both unspecified.") myrange = np.arange(start, stop, step) # Check the length if it was specified. if length is not None: assert len(myrange) == length return myrange
[ "def", "_range_from_slice", "(", "myslice", ",", "start", "=", "None", ",", "stop", "=", "None", ",", "step", "=", "None", ",", "length", "=", "None", ")", ":", "assert", "isinstance", "(", "myslice", ",", "slice", ")", "# Find 'step'.", "step", "=", "myslice", ".", "step", "if", "myslice", ".", "step", "is", "not", "None", "else", "step", "if", "step", "is", "None", ":", "step", "=", "1", "# Find 'start'.", "start", "=", "myslice", ".", "start", "if", "myslice", ".", "start", "is", "not", "None", "else", "start", "if", "start", "is", "None", ":", "start", "=", "0", "# Find 'stop' as a function of length if 'stop' is unspecified.", "stop", "=", "myslice", ".", "stop", "if", "myslice", ".", "stop", "is", "not", "None", "else", "stop", "if", "length", "is", "not", "None", ":", "stop_inferred", "=", "floor", "(", "start", "+", "step", "*", "length", ")", "if", "stop", "is", "not", "None", "and", "stop", "<", "stop_inferred", ":", "raise", "ValueError", "(", "\"'stop' ({stop}) and \"", ".", "format", "(", "stop", "=", "stop", ")", "+", "\"'length' ({length}) \"", ".", "format", "(", "length", "=", "length", ")", "+", "\"are not compatible.\"", ")", "stop", "=", "stop_inferred", "if", "stop", "is", "None", "and", "length", "is", "None", ":", "raise", "ValueError", "(", "\"'stop' and 'length' cannot be both unspecified.\"", ")", "myrange", "=", "np", ".", "arange", "(", "start", ",", "stop", ",", "step", ")", "# Check the length if it was specified.", "if", "length", "is", "not", "None", ":", "assert", "len", "(", "myrange", ")", "==", "length", "return", "myrange" ]
Convert a slice to an array of integers.
[ "Convert", "a", "slice", "to", "an", "array", "of", "integers", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L28-L54
train
kwikteam/phy
phy/io/array.py
_index_of
def _index_of(arr, lookup): """Replace scalars in an array by their indices in a lookup table. Implicitely assume that: * All elements of arr and lookup are non-negative integers. * All elements or arr belong to lookup. This is not checked for performance reasons. """ # Equivalent of np.digitize(arr, lookup) - 1, but much faster. # TODO: assertions to disable in production for performance reasons. # TODO: np.searchsorted(lookup, arr) is faster on small arrays with large # values lookup = np.asarray(lookup, dtype=np.int32) m = (lookup.max() if len(lookup) else 0) + 1 tmp = np.zeros(m + 1, dtype=np.int) # Ensure that -1 values are kept. tmp[-1] = -1 if len(lookup): tmp[lookup] = np.arange(len(lookup)) return tmp[arr]
python
def _index_of(arr, lookup): """Replace scalars in an array by their indices in a lookup table. Implicitely assume that: * All elements of arr and lookup are non-negative integers. * All elements or arr belong to lookup. This is not checked for performance reasons. """ # Equivalent of np.digitize(arr, lookup) - 1, but much faster. # TODO: assertions to disable in production for performance reasons. # TODO: np.searchsorted(lookup, arr) is faster on small arrays with large # values lookup = np.asarray(lookup, dtype=np.int32) m = (lookup.max() if len(lookup) else 0) + 1 tmp = np.zeros(m + 1, dtype=np.int) # Ensure that -1 values are kept. tmp[-1] = -1 if len(lookup): tmp[lookup] = np.arange(len(lookup)) return tmp[arr]
[ "def", "_index_of", "(", "arr", ",", "lookup", ")", ":", "# Equivalent of np.digitize(arr, lookup) - 1, but much faster.", "# TODO: assertions to disable in production for performance reasons.", "# TODO: np.searchsorted(lookup, arr) is faster on small arrays with large", "# values", "lookup", "=", "np", ".", "asarray", "(", "lookup", ",", "dtype", "=", "np", ".", "int32", ")", "m", "=", "(", "lookup", ".", "max", "(", ")", "if", "len", "(", "lookup", ")", "else", "0", ")", "+", "1", "tmp", "=", "np", ".", "zeros", "(", "m", "+", "1", ",", "dtype", "=", "np", ".", "int", ")", "# Ensure that -1 values are kept.", "tmp", "[", "-", "1", "]", "=", "-", "1", "if", "len", "(", "lookup", ")", ":", "tmp", "[", "lookup", "]", "=", "np", ".", "arange", "(", "len", "(", "lookup", ")", ")", "return", "tmp", "[", "arr", "]" ]
Replace scalars in an array by their indices in a lookup table. Implicitely assume that: * All elements of arr and lookup are non-negative integers. * All elements or arr belong to lookup. This is not checked for performance reasons.
[ "Replace", "scalars", "in", "an", "array", "by", "their", "indices", "in", "a", "lookup", "table", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L99-L121
train
kwikteam/phy
phy/io/array.py
_pad
def _pad(arr, n, dir='right'): """Pad an array with zeros along the first axis. Parameters ---------- n : int Size of the returned array in the first axis. dir : str Direction of the padding. Must be one 'left' or 'right'. """ assert dir in ('left', 'right') if n < 0: raise ValueError("'n' must be positive: {0}.".format(n)) elif n == 0: return np.zeros((0,) + arr.shape[1:], dtype=arr.dtype) n_arr = arr.shape[0] shape = (n,) + arr.shape[1:] if n_arr == n: assert arr.shape == shape return arr elif n_arr < n: out = np.zeros(shape, dtype=arr.dtype) if dir == 'left': out[-n_arr:, ...] = arr elif dir == 'right': out[:n_arr, ...] = arr assert out.shape == shape return out else: if dir == 'left': out = arr[-n:, ...] elif dir == 'right': out = arr[:n, ...] assert out.shape == shape return out
python
def _pad(arr, n, dir='right'): """Pad an array with zeros along the first axis. Parameters ---------- n : int Size of the returned array in the first axis. dir : str Direction of the padding. Must be one 'left' or 'right'. """ assert dir in ('left', 'right') if n < 0: raise ValueError("'n' must be positive: {0}.".format(n)) elif n == 0: return np.zeros((0,) + arr.shape[1:], dtype=arr.dtype) n_arr = arr.shape[0] shape = (n,) + arr.shape[1:] if n_arr == n: assert arr.shape == shape return arr elif n_arr < n: out = np.zeros(shape, dtype=arr.dtype) if dir == 'left': out[-n_arr:, ...] = arr elif dir == 'right': out[:n_arr, ...] = arr assert out.shape == shape return out else: if dir == 'left': out = arr[-n:, ...] elif dir == 'right': out = arr[:n, ...] assert out.shape == shape return out
[ "def", "_pad", "(", "arr", ",", "n", ",", "dir", "=", "'right'", ")", ":", "assert", "dir", "in", "(", "'left'", ",", "'right'", ")", "if", "n", "<", "0", ":", "raise", "ValueError", "(", "\"'n' must be positive: {0}.\"", ".", "format", "(", "n", ")", ")", "elif", "n", "==", "0", ":", "return", "np", ".", "zeros", "(", "(", "0", ",", ")", "+", "arr", ".", "shape", "[", "1", ":", "]", ",", "dtype", "=", "arr", ".", "dtype", ")", "n_arr", "=", "arr", ".", "shape", "[", "0", "]", "shape", "=", "(", "n", ",", ")", "+", "arr", ".", "shape", "[", "1", ":", "]", "if", "n_arr", "==", "n", ":", "assert", "arr", ".", "shape", "==", "shape", "return", "arr", "elif", "n_arr", "<", "n", ":", "out", "=", "np", ".", "zeros", "(", "shape", ",", "dtype", "=", "arr", ".", "dtype", ")", "if", "dir", "==", "'left'", ":", "out", "[", "-", "n_arr", ":", ",", "...", "]", "=", "arr", "elif", "dir", "==", "'right'", ":", "out", "[", ":", "n_arr", ",", "...", "]", "=", "arr", "assert", "out", ".", "shape", "==", "shape", "return", "out", "else", ":", "if", "dir", "==", "'left'", ":", "out", "=", "arr", "[", "-", "n", ":", ",", "...", "]", "elif", "dir", "==", "'right'", ":", "out", "=", "arr", "[", ":", "n", ",", "...", "]", "assert", "out", ".", "shape", "==", "shape", "return", "out" ]
Pad an array with zeros along the first axis. Parameters ---------- n : int Size of the returned array in the first axis. dir : str Direction of the padding. Must be one 'left' or 'right'.
[ "Pad", "an", "array", "with", "zeros", "along", "the", "first", "axis", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L124-L160
train
kwikteam/phy
phy/io/array.py
_in_polygon
def _in_polygon(points, polygon): """Return the points that are inside a polygon.""" from matplotlib.path import Path points = _as_array(points) polygon = _as_array(polygon) assert points.ndim == 2 assert polygon.ndim == 2 if len(polygon): polygon = np.vstack((polygon, polygon[0])) path = Path(polygon, closed=True) return path.contains_points(points)
python
def _in_polygon(points, polygon): """Return the points that are inside a polygon.""" from matplotlib.path import Path points = _as_array(points) polygon = _as_array(polygon) assert points.ndim == 2 assert polygon.ndim == 2 if len(polygon): polygon = np.vstack((polygon, polygon[0])) path = Path(polygon, closed=True) return path.contains_points(points)
[ "def", "_in_polygon", "(", "points", ",", "polygon", ")", ":", "from", "matplotlib", ".", "path", "import", "Path", "points", "=", "_as_array", "(", "points", ")", "polygon", "=", "_as_array", "(", "polygon", ")", "assert", "points", ".", "ndim", "==", "2", "assert", "polygon", ".", "ndim", "==", "2", "if", "len", "(", "polygon", ")", ":", "polygon", "=", "np", ".", "vstack", "(", "(", "polygon", ",", "polygon", "[", "0", "]", ")", ")", "path", "=", "Path", "(", "polygon", ",", "closed", "=", "True", ")", "return", "path", ".", "contains_points", "(", "points", ")" ]
Return the points that are inside a polygon.
[ "Return", "the", "points", "that", "are", "inside", "a", "polygon", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L183-L193
train
kwikteam/phy
phy/io/array.py
read_array
def read_array(path, mmap_mode=None): """Read a .npy array.""" file_ext = op.splitext(path)[1] if file_ext == '.npy': return np.load(path, mmap_mode=mmap_mode) raise NotImplementedError("The file extension `{}` ".format(file_ext) + "is not currently supported.")
python
def read_array(path, mmap_mode=None): """Read a .npy array.""" file_ext = op.splitext(path)[1] if file_ext == '.npy': return np.load(path, mmap_mode=mmap_mode) raise NotImplementedError("The file extension `{}` ".format(file_ext) + "is not currently supported.")
[ "def", "read_array", "(", "path", ",", "mmap_mode", "=", "None", ")", ":", "file_ext", "=", "op", ".", "splitext", "(", "path", ")", "[", "1", "]", "if", "file_ext", "==", "'.npy'", ":", "return", "np", ".", "load", "(", "path", ",", "mmap_mode", "=", "mmap_mode", ")", "raise", "NotImplementedError", "(", "\"The file extension `{}` \"", ".", "format", "(", "file_ext", ")", "+", "\"is not currently supported.\"", ")" ]
Read a .npy array.
[ "Read", "a", ".", "npy", "array", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L219-L225
train
kwikteam/phy
phy/io/array.py
write_array
def write_array(path, arr): """Write an array to a .npy file.""" file_ext = op.splitext(path)[1] if file_ext == '.npy': return np.save(path, arr) raise NotImplementedError("The file extension `{}` ".format(file_ext) + "is not currently supported.")
python
def write_array(path, arr): """Write an array to a .npy file.""" file_ext = op.splitext(path)[1] if file_ext == '.npy': return np.save(path, arr) raise NotImplementedError("The file extension `{}` ".format(file_ext) + "is not currently supported.")
[ "def", "write_array", "(", "path", ",", "arr", ")", ":", "file_ext", "=", "op", ".", "splitext", "(", "path", ")", "[", "1", "]", "if", "file_ext", "==", "'.npy'", ":", "return", "np", ".", "save", "(", "path", ",", "arr", ")", "raise", "NotImplementedError", "(", "\"The file extension `{}` \"", ".", "format", "(", "file_ext", ")", "+", "\"is not currently supported.\"", ")" ]
Write an array to a .npy file.
[ "Write", "an", "array", "to", "a", ".", "npy", "file", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L228-L234
train
kwikteam/phy
phy/io/array.py
_concatenate_virtual_arrays
def _concatenate_virtual_arrays(arrs, cols=None, scaling=None): """Return a virtual concatenate of several NumPy arrays.""" return None if not len(arrs) else ConcatenatedArrays(arrs, cols, scaling=scaling)
python
def _concatenate_virtual_arrays(arrs, cols=None, scaling=None): """Return a virtual concatenate of several NumPy arrays.""" return None if not len(arrs) else ConcatenatedArrays(arrs, cols, scaling=scaling)
[ "def", "_concatenate_virtual_arrays", "(", "arrs", ",", "cols", "=", "None", ",", "scaling", "=", "None", ")", ":", "return", "None", "if", "not", "len", "(", "arrs", ")", "else", "ConcatenatedArrays", "(", "arrs", ",", "cols", ",", "scaling", "=", "scaling", ")" ]
Return a virtual concatenate of several NumPy arrays.
[ "Return", "a", "virtual", "concatenate", "of", "several", "NumPy", "arrays", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L359-L362
train
kwikteam/phy
phy/io/array.py
_excerpt_step
def _excerpt_step(n_samples, n_excerpts=None, excerpt_size=None): """Compute the step of an excerpt set as a function of the number of excerpts or their sizes.""" assert n_excerpts >= 2 step = max((n_samples - excerpt_size) // (n_excerpts - 1), excerpt_size) return step
python
def _excerpt_step(n_samples, n_excerpts=None, excerpt_size=None): """Compute the step of an excerpt set as a function of the number of excerpts or their sizes.""" assert n_excerpts >= 2 step = max((n_samples - excerpt_size) // (n_excerpts - 1), excerpt_size) return step
[ "def", "_excerpt_step", "(", "n_samples", ",", "n_excerpts", "=", "None", ",", "excerpt_size", "=", "None", ")", ":", "assert", "n_excerpts", ">=", "2", "step", "=", "max", "(", "(", "n_samples", "-", "excerpt_size", ")", "//", "(", "n_excerpts", "-", "1", ")", ",", "excerpt_size", ")", "return", "step" ]
Compute the step of an excerpt set as a function of the number of excerpts or their sizes.
[ "Compute", "the", "step", "of", "an", "excerpt", "set", "as", "a", "function", "of", "the", "number", "of", "excerpts", "or", "their", "sizes", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L369-L375
train
kwikteam/phy
phy/io/array.py
chunk_bounds
def chunk_bounds(n_samples, chunk_size, overlap=0): """Return chunk bounds. Chunks have the form: [ overlap/2 | chunk_size-overlap | overlap/2 ] s_start keep_start keep_end s_end Except for the first and last chunks which do not have a left/right overlap. This generator yields (s_start, s_end, keep_start, keep_end). """ s_start = 0 s_end = chunk_size keep_start = s_start keep_end = s_end - overlap // 2 yield s_start, s_end, keep_start, keep_end while s_end - overlap + chunk_size < n_samples: s_start = s_end - overlap s_end = s_start + chunk_size keep_start = keep_end keep_end = s_end - overlap // 2 if s_start < s_end: yield s_start, s_end, keep_start, keep_end s_start = s_end - overlap s_end = n_samples keep_start = keep_end keep_end = s_end if s_start < s_end: yield s_start, s_end, keep_start, keep_end
python
def chunk_bounds(n_samples, chunk_size, overlap=0): """Return chunk bounds. Chunks have the form: [ overlap/2 | chunk_size-overlap | overlap/2 ] s_start keep_start keep_end s_end Except for the first and last chunks which do not have a left/right overlap. This generator yields (s_start, s_end, keep_start, keep_end). """ s_start = 0 s_end = chunk_size keep_start = s_start keep_end = s_end - overlap // 2 yield s_start, s_end, keep_start, keep_end while s_end - overlap + chunk_size < n_samples: s_start = s_end - overlap s_end = s_start + chunk_size keep_start = keep_end keep_end = s_end - overlap // 2 if s_start < s_end: yield s_start, s_end, keep_start, keep_end s_start = s_end - overlap s_end = n_samples keep_start = keep_end keep_end = s_end if s_start < s_end: yield s_start, s_end, keep_start, keep_end
[ "def", "chunk_bounds", "(", "n_samples", ",", "chunk_size", ",", "overlap", "=", "0", ")", ":", "s_start", "=", "0", "s_end", "=", "chunk_size", "keep_start", "=", "s_start", "keep_end", "=", "s_end", "-", "overlap", "//", "2", "yield", "s_start", ",", "s_end", ",", "keep_start", ",", "keep_end", "while", "s_end", "-", "overlap", "+", "chunk_size", "<", "n_samples", ":", "s_start", "=", "s_end", "-", "overlap", "s_end", "=", "s_start", "+", "chunk_size", "keep_start", "=", "keep_end", "keep_end", "=", "s_end", "-", "overlap", "//", "2", "if", "s_start", "<", "s_end", ":", "yield", "s_start", ",", "s_end", ",", "keep_start", ",", "keep_end", "s_start", "=", "s_end", "-", "overlap", "s_end", "=", "n_samples", "keep_start", "=", "keep_end", "keep_end", "=", "s_end", "if", "s_start", "<", "s_end", ":", "yield", "s_start", ",", "s_end", ",", "keep_start", ",", "keep_end" ]
Return chunk bounds. Chunks have the form: [ overlap/2 | chunk_size-overlap | overlap/2 ] s_start keep_start keep_end s_end Except for the first and last chunks which do not have a left/right overlap. This generator yields (s_start, s_end, keep_start, keep_end).
[ "Return", "chunk", "bounds", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L378-L411
train
kwikteam/phy
phy/io/array.py
data_chunk
def data_chunk(data, chunk, with_overlap=False): """Get a data chunk.""" assert isinstance(chunk, tuple) if len(chunk) == 2: i, j = chunk elif len(chunk) == 4: if with_overlap: i, j = chunk[:2] else: i, j = chunk[2:] else: raise ValueError("'chunk' should have 2 or 4 elements, " "not {0:d}".format(len(chunk))) return data[i:j, ...]
python
def data_chunk(data, chunk, with_overlap=False): """Get a data chunk.""" assert isinstance(chunk, tuple) if len(chunk) == 2: i, j = chunk elif len(chunk) == 4: if with_overlap: i, j = chunk[:2] else: i, j = chunk[2:] else: raise ValueError("'chunk' should have 2 or 4 elements, " "not {0:d}".format(len(chunk))) return data[i:j, ...]
[ "def", "data_chunk", "(", "data", ",", "chunk", ",", "with_overlap", "=", "False", ")", ":", "assert", "isinstance", "(", "chunk", ",", "tuple", ")", "if", "len", "(", "chunk", ")", "==", "2", ":", "i", ",", "j", "=", "chunk", "elif", "len", "(", "chunk", ")", "==", "4", ":", "if", "with_overlap", ":", "i", ",", "j", "=", "chunk", "[", ":", "2", "]", "else", ":", "i", ",", "j", "=", "chunk", "[", "2", ":", "]", "else", ":", "raise", "ValueError", "(", "\"'chunk' should have 2 or 4 elements, \"", "\"not {0:d}\"", ".", "format", "(", "len", "(", "chunk", ")", ")", ")", "return", "data", "[", "i", ":", "j", ",", "...", "]" ]
Get a data chunk.
[ "Get", "a", "data", "chunk", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L428-L441
train
kwikteam/phy
phy/io/array.py
_spikes_in_clusters
def _spikes_in_clusters(spike_clusters, clusters): """Return the ids of all spikes belonging to the specified clusters.""" if len(spike_clusters) == 0 or len(clusters) == 0: return np.array([], dtype=np.int) return np.nonzero(np.in1d(spike_clusters, clusters))[0]
python
def _spikes_in_clusters(spike_clusters, clusters): """Return the ids of all spikes belonging to the specified clusters.""" if len(spike_clusters) == 0 or len(clusters) == 0: return np.array([], dtype=np.int) return np.nonzero(np.in1d(spike_clusters, clusters))[0]
[ "def", "_spikes_in_clusters", "(", "spike_clusters", ",", "clusters", ")", ":", "if", "len", "(", "spike_clusters", ")", "==", "0", "or", "len", "(", "clusters", ")", "==", "0", ":", "return", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "np", ".", "int", ")", "return", "np", ".", "nonzero", "(", "np", ".", "in1d", "(", "spike_clusters", ",", "clusters", ")", ")", "[", "0", "]" ]
Return the ids of all spikes belonging to the specified clusters.
[ "Return", "the", "ids", "of", "all", "spikes", "belonging", "to", "the", "specified", "clusters", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L465-L469
train
kwikteam/phy
phy/io/array.py
grouped_mean
def grouped_mean(arr, spike_clusters): """Compute the mean of a spike-dependent quantity for every cluster. The two arguments should be 1D array with `n_spikes` elements. The output is a 1D array with `n_clusters` elements. The clusters are sorted in increasing order. """ arr = np.asarray(arr) spike_clusters = np.asarray(spike_clusters) assert arr.ndim == 1 assert arr.shape[0] == len(spike_clusters) cluster_ids = _unique(spike_clusters) spike_clusters_rel = _index_of(spike_clusters, cluster_ids) spike_counts = np.bincount(spike_clusters_rel) assert len(spike_counts) == len(cluster_ids) t = np.zeros(len(cluster_ids)) # Compute the sum with possible repetitions. np.add.at(t, spike_clusters_rel, arr) return t / spike_counts
python
def grouped_mean(arr, spike_clusters): """Compute the mean of a spike-dependent quantity for every cluster. The two arguments should be 1D array with `n_spikes` elements. The output is a 1D array with `n_clusters` elements. The clusters are sorted in increasing order. """ arr = np.asarray(arr) spike_clusters = np.asarray(spike_clusters) assert arr.ndim == 1 assert arr.shape[0] == len(spike_clusters) cluster_ids = _unique(spike_clusters) spike_clusters_rel = _index_of(spike_clusters, cluster_ids) spike_counts = np.bincount(spike_clusters_rel) assert len(spike_counts) == len(cluster_ids) t = np.zeros(len(cluster_ids)) # Compute the sum with possible repetitions. np.add.at(t, spike_clusters_rel, arr) return t / spike_counts
[ "def", "grouped_mean", "(", "arr", ",", "spike_clusters", ")", ":", "arr", "=", "np", ".", "asarray", "(", "arr", ")", "spike_clusters", "=", "np", ".", "asarray", "(", "spike_clusters", ")", "assert", "arr", ".", "ndim", "==", "1", "assert", "arr", ".", "shape", "[", "0", "]", "==", "len", "(", "spike_clusters", ")", "cluster_ids", "=", "_unique", "(", "spike_clusters", ")", "spike_clusters_rel", "=", "_index_of", "(", "spike_clusters", ",", "cluster_ids", ")", "spike_counts", "=", "np", ".", "bincount", "(", "spike_clusters_rel", ")", "assert", "len", "(", "spike_counts", ")", "==", "len", "(", "cluster_ids", ")", "t", "=", "np", ".", "zeros", "(", "len", "(", "cluster_ids", ")", ")", "# Compute the sum with possible repetitions.", "np", ".", "add", ".", "at", "(", "t", ",", "spike_clusters_rel", ",", "arr", ")", "return", "t", "/", "spike_counts" ]
Compute the mean of a spike-dependent quantity for every cluster. The two arguments should be 1D array with `n_spikes` elements. The output is a 1D array with `n_clusters` elements. The clusters are sorted in increasing order.
[ "Compute", "the", "mean", "of", "a", "spike", "-", "dependent", "quantity", "for", "every", "cluster", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L506-L526
train
kwikteam/phy
phy/io/array.py
regular_subset
def regular_subset(spikes, n_spikes_max=None, offset=0): """Prune the current selection to get at most n_spikes_max spikes.""" assert spikes is not None # Nothing to do if the selection already satisfies n_spikes_max. if n_spikes_max is None or len(spikes) <= n_spikes_max: # pragma: no cover return spikes step = math.ceil(np.clip(1. / n_spikes_max * len(spikes), 1, len(spikes))) step = int(step) # Note: randomly-changing selections are confusing... my_spikes = spikes[offset::step][:n_spikes_max] assert len(my_spikes) <= len(spikes) assert len(my_spikes) <= n_spikes_max return my_spikes
python
def regular_subset(spikes, n_spikes_max=None, offset=0): """Prune the current selection to get at most n_spikes_max spikes.""" assert spikes is not None # Nothing to do if the selection already satisfies n_spikes_max. if n_spikes_max is None or len(spikes) <= n_spikes_max: # pragma: no cover return spikes step = math.ceil(np.clip(1. / n_spikes_max * len(spikes), 1, len(spikes))) step = int(step) # Note: randomly-changing selections are confusing... my_spikes = spikes[offset::step][:n_spikes_max] assert len(my_spikes) <= len(spikes) assert len(my_spikes) <= n_spikes_max return my_spikes
[ "def", "regular_subset", "(", "spikes", ",", "n_spikes_max", "=", "None", ",", "offset", "=", "0", ")", ":", "assert", "spikes", "is", "not", "None", "# Nothing to do if the selection already satisfies n_spikes_max.", "if", "n_spikes_max", "is", "None", "or", "len", "(", "spikes", ")", "<=", "n_spikes_max", ":", "# pragma: no cover", "return", "spikes", "step", "=", "math", ".", "ceil", "(", "np", ".", "clip", "(", "1.", "/", "n_spikes_max", "*", "len", "(", "spikes", ")", ",", "1", ",", "len", "(", "spikes", ")", ")", ")", "step", "=", "int", "(", "step", ")", "# Note: randomly-changing selections are confusing...", "my_spikes", "=", "spikes", "[", "offset", ":", ":", "step", "]", "[", ":", "n_spikes_max", "]", "assert", "len", "(", "my_spikes", ")", "<=", "len", "(", "spikes", ")", "assert", "len", "(", "my_spikes", ")", "<=", "n_spikes_max", "return", "my_spikes" ]
Prune the current selection to get at most n_spikes_max spikes.
[ "Prune", "the", "current", "selection", "to", "get", "at", "most", "n_spikes_max", "spikes", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L529-L542
train
kwikteam/phy
phy/io/array.py
select_spikes
def select_spikes(cluster_ids=None, max_n_spikes_per_cluster=None, spikes_per_cluster=None, batch_size=None, subset=None, ): """Return a selection of spikes belonging to the specified clusters.""" subset = subset or 'regular' assert _is_array_like(cluster_ids) if not len(cluster_ids): return np.array([], dtype=np.int64) if max_n_spikes_per_cluster in (None, 0): selection = {c: spikes_per_cluster(c) for c in cluster_ids} else: assert max_n_spikes_per_cluster > 0 selection = {} n_clusters = len(cluster_ids) for cluster in cluster_ids: # Decrease the number of spikes per cluster when there # are more clusters. n = int(max_n_spikes_per_cluster * exp(-.1 * (n_clusters - 1))) n = max(1, n) spike_ids = spikes_per_cluster(cluster) if subset == 'regular': # Regular subselection. if batch_size is None or len(spike_ids) <= max(batch_size, n): spike_ids = regular_subset(spike_ids, n_spikes_max=n) else: # Batch selections of spikes. spike_ids = get_excerpts(spike_ids, n // batch_size, batch_size) elif subset == 'random' and len(spike_ids) > n: # Random subselection. spike_ids = np.random.choice(spike_ids, n, replace=False) spike_ids = np.unique(spike_ids) selection[cluster] = spike_ids return _flatten_per_cluster(selection)
python
def select_spikes(cluster_ids=None, max_n_spikes_per_cluster=None, spikes_per_cluster=None, batch_size=None, subset=None, ): """Return a selection of spikes belonging to the specified clusters.""" subset = subset or 'regular' assert _is_array_like(cluster_ids) if not len(cluster_ids): return np.array([], dtype=np.int64) if max_n_spikes_per_cluster in (None, 0): selection = {c: spikes_per_cluster(c) for c in cluster_ids} else: assert max_n_spikes_per_cluster > 0 selection = {} n_clusters = len(cluster_ids) for cluster in cluster_ids: # Decrease the number of spikes per cluster when there # are more clusters. n = int(max_n_spikes_per_cluster * exp(-.1 * (n_clusters - 1))) n = max(1, n) spike_ids = spikes_per_cluster(cluster) if subset == 'regular': # Regular subselection. if batch_size is None or len(spike_ids) <= max(batch_size, n): spike_ids = regular_subset(spike_ids, n_spikes_max=n) else: # Batch selections of spikes. spike_ids = get_excerpts(spike_ids, n // batch_size, batch_size) elif subset == 'random' and len(spike_ids) > n: # Random subselection. spike_ids = np.random.choice(spike_ids, n, replace=False) spike_ids = np.unique(spike_ids) selection[cluster] = spike_ids return _flatten_per_cluster(selection)
[ "def", "select_spikes", "(", "cluster_ids", "=", "None", ",", "max_n_spikes_per_cluster", "=", "None", ",", "spikes_per_cluster", "=", "None", ",", "batch_size", "=", "None", ",", "subset", "=", "None", ",", ")", ":", "subset", "=", "subset", "or", "'regular'", "assert", "_is_array_like", "(", "cluster_ids", ")", "if", "not", "len", "(", "cluster_ids", ")", ":", "return", "np", ".", "array", "(", "[", "]", ",", "dtype", "=", "np", ".", "int64", ")", "if", "max_n_spikes_per_cluster", "in", "(", "None", ",", "0", ")", ":", "selection", "=", "{", "c", ":", "spikes_per_cluster", "(", "c", ")", "for", "c", "in", "cluster_ids", "}", "else", ":", "assert", "max_n_spikes_per_cluster", ">", "0", "selection", "=", "{", "}", "n_clusters", "=", "len", "(", "cluster_ids", ")", "for", "cluster", "in", "cluster_ids", ":", "# Decrease the number of spikes per cluster when there", "# are more clusters.", "n", "=", "int", "(", "max_n_spikes_per_cluster", "*", "exp", "(", "-", ".1", "*", "(", "n_clusters", "-", "1", ")", ")", ")", "n", "=", "max", "(", "1", ",", "n", ")", "spike_ids", "=", "spikes_per_cluster", "(", "cluster", ")", "if", "subset", "==", "'regular'", ":", "# Regular subselection.", "if", "batch_size", "is", "None", "or", "len", "(", "spike_ids", ")", "<=", "max", "(", "batch_size", ",", "n", ")", ":", "spike_ids", "=", "regular_subset", "(", "spike_ids", ",", "n_spikes_max", "=", "n", ")", "else", ":", "# Batch selections of spikes.", "spike_ids", "=", "get_excerpts", "(", "spike_ids", ",", "n", "//", "batch_size", ",", "batch_size", ")", "elif", "subset", "==", "'random'", "and", "len", "(", "spike_ids", ")", ">", "n", ":", "# Random subselection.", "spike_ids", "=", "np", ".", "random", ".", "choice", "(", "spike_ids", ",", "n", ",", "replace", "=", "False", ")", "spike_ids", "=", "np", ".", "unique", "(", "spike_ids", ")", "selection", "[", "cluster", "]", "=", "spike_ids", "return", "_flatten_per_cluster", "(", "selection", ")" ]
Return a selection of spikes belonging to the specified clusters.
[ "Return", "a", "selection", "of", "spikes", "belonging", "to", "the", "specified", "clusters", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L545-L582
train
kwikteam/phy
phy/io/array.py
ConcatenatedArrays._get_recording
def _get_recording(self, index): """Return the recording that contains a given index.""" assert index >= 0 recs = np.nonzero((index - self.offsets[:-1]) >= 0)[0] if len(recs) == 0: # pragma: no cover # If the index is greater than the total size, # return the last recording. return len(self.arrs) - 1 # Return the last recording such that the index is greater than # its offset. return recs[-1]
python
def _get_recording(self, index): """Return the recording that contains a given index.""" assert index >= 0 recs = np.nonzero((index - self.offsets[:-1]) >= 0)[0] if len(recs) == 0: # pragma: no cover # If the index is greater than the total size, # return the last recording. return len(self.arrs) - 1 # Return the last recording such that the index is greater than # its offset. return recs[-1]
[ "def", "_get_recording", "(", "self", ",", "index", ")", ":", "assert", "index", ">=", "0", "recs", "=", "np", ".", "nonzero", "(", "(", "index", "-", "self", ".", "offsets", "[", ":", "-", "1", "]", ")", ">=", "0", ")", "[", "0", "]", "if", "len", "(", "recs", ")", "==", "0", ":", "# pragma: no cover", "# If the index is greater than the total size,", "# return the last recording.", "return", "len", "(", "self", ".", "arrs", ")", "-", "1", "# Return the last recording such that the index is greater than", "# its offset.", "return", "recs", "[", "-", "1", "]" ]
Return the recording that contains a given index.
[ "Return", "the", "recording", "that", "contains", "a", "given", "index", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/io/array.py#L297-L307
train
kwikteam/phy
phy/traces/filter.py
bandpass_filter
def bandpass_filter(rate=None, low=None, high=None, order=None): """Butterworth bandpass filter.""" assert low < high assert order >= 1 return signal.butter(order, (low / (rate / 2.), high / (rate / 2.)), 'pass')
python
def bandpass_filter(rate=None, low=None, high=None, order=None): """Butterworth bandpass filter.""" assert low < high assert order >= 1 return signal.butter(order, (low / (rate / 2.), high / (rate / 2.)), 'pass')
[ "def", "bandpass_filter", "(", "rate", "=", "None", ",", "low", "=", "None", ",", "high", "=", "None", ",", "order", "=", "None", ")", ":", "assert", "low", "<", "high", "assert", "order", ">=", "1", "return", "signal", ".", "butter", "(", "order", ",", "(", "low", "/", "(", "rate", "/", "2.", ")", ",", "high", "/", "(", "rate", "/", "2.", ")", ")", ",", "'pass'", ")" ]
Butterworth bandpass filter.
[ "Butterworth", "bandpass", "filter", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/traces/filter.py#L19-L25
train
kwikteam/phy
phy/traces/filter.py
apply_filter
def apply_filter(x, filter=None, axis=0): """Apply a filter to an array.""" x = _as_array(x) if x.shape[axis] == 0: return x b, a = filter return signal.filtfilt(b, a, x, axis=axis)
python
def apply_filter(x, filter=None, axis=0): """Apply a filter to an array.""" x = _as_array(x) if x.shape[axis] == 0: return x b, a = filter return signal.filtfilt(b, a, x, axis=axis)
[ "def", "apply_filter", "(", "x", ",", "filter", "=", "None", ",", "axis", "=", "0", ")", ":", "x", "=", "_as_array", "(", "x", ")", "if", "x", ".", "shape", "[", "axis", "]", "==", "0", ":", "return", "x", "b", ",", "a", "=", "filter", "return", "signal", ".", "filtfilt", "(", "b", ",", "a", ",", "x", ",", "axis", "=", "axis", ")" ]
Apply a filter to an array.
[ "Apply", "a", "filter", "to", "an", "array", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/traces/filter.py#L28-L34
train
kwikteam/phy
phy/traces/filter.py
Whitening.fit
def fit(self, x, fudge=1e-18): """Compute the whitening matrix. Parameters ---------- x : array An `(n_samples, n_channels)` array. """ assert x.ndim == 2 ns, nc = x.shape x_cov = np.cov(x, rowvar=0) assert x_cov.shape == (nc, nc) d, v = np.linalg.eigh(x_cov) d = np.diag(1. / np.sqrt(d + fudge)) # This is equivalent, but seems much slower... # w = np.einsum('il,lk,jk->ij', v, d, v) w = np.dot(np.dot(v, d), v.T) self._matrix = w return w
python
def fit(self, x, fudge=1e-18): """Compute the whitening matrix. Parameters ---------- x : array An `(n_samples, n_channels)` array. """ assert x.ndim == 2 ns, nc = x.shape x_cov = np.cov(x, rowvar=0) assert x_cov.shape == (nc, nc) d, v = np.linalg.eigh(x_cov) d = np.diag(1. / np.sqrt(d + fudge)) # This is equivalent, but seems much slower... # w = np.einsum('il,lk,jk->ij', v, d, v) w = np.dot(np.dot(v, d), v.T) self._matrix = w return w
[ "def", "fit", "(", "self", ",", "x", ",", "fudge", "=", "1e-18", ")", ":", "assert", "x", ".", "ndim", "==", "2", "ns", ",", "nc", "=", "x", ".", "shape", "x_cov", "=", "np", ".", "cov", "(", "x", ",", "rowvar", "=", "0", ")", "assert", "x_cov", ".", "shape", "==", "(", "nc", ",", "nc", ")", "d", ",", "v", "=", "np", ".", "linalg", ".", "eigh", "(", "x_cov", ")", "d", "=", "np", ".", "diag", "(", "1.", "/", "np", ".", "sqrt", "(", "d", "+", "fudge", ")", ")", "# This is equivalent, but seems much slower...", "# w = np.einsum('il,lk,jk->ij', v, d, v)", "w", "=", "np", ".", "dot", "(", "np", ".", "dot", "(", "v", ",", "d", ")", ",", "v", ".", "T", ")", "self", ".", "_matrix", "=", "w", "return", "w" ]
Compute the whitening matrix. Parameters ---------- x : array An `(n_samples, n_channels)` array.
[ "Compute", "the", "whitening", "matrix", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/traces/filter.py#L72-L92
train
kwikteam/phy
phy/cluster/_history.py
History.current_item
def current_item(self): """Return the current element.""" if self._history and self._index >= 0: self._check_index() return self._history[self._index]
python
def current_item(self): """Return the current element.""" if self._history and self._index >= 0: self._check_index() return self._history[self._index]
[ "def", "current_item", "(", "self", ")", ":", "if", "self", ".", "_history", "and", "self", ".", "_index", ">=", "0", ":", "self", ".", "_check_index", "(", ")", "return", "self", ".", "_history", "[", "self", ".", "_index", "]" ]
Return the current element.
[ "Return", "the", "current", "element", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/_history.py#L28-L32
train
kwikteam/phy
phy/cluster/_history.py
History._check_index
def _check_index(self): """Check that the index is without the bounds of _history.""" assert 0 <= self._index <= len(self._history) - 1 # There should always be the base item at least. assert len(self._history) >= 1
python
def _check_index(self): """Check that the index is without the bounds of _history.""" assert 0 <= self._index <= len(self._history) - 1 # There should always be the base item at least. assert len(self._history) >= 1
[ "def", "_check_index", "(", "self", ")", ":", "assert", "0", "<=", "self", ".", "_index", "<=", "len", "(", "self", ".", "_history", ")", "-", "1", "# There should always be the base item at least.", "assert", "len", "(", "self", ".", "_history", ")", ">=", "1" ]
Check that the index is without the bounds of _history.
[ "Check", "that", "the", "index", "is", "without", "the", "bounds", "of", "_history", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/_history.py#L39-L43
train
kwikteam/phy
phy/cluster/_history.py
History.iter
def iter(self, start=0, end=None): """Iterate through successive history items. Parameters ---------- end : int Index of the last item to loop through + 1. start : int Initial index for the loop (0 by default). """ if end is None: end = self._index + 1 elif end == 0: raise StopIteration() if start >= end: raise StopIteration() # Check arguments. assert 0 <= end <= len(self._history) assert 0 <= start <= end - 1 for i in range(start, end): yield self._history[i]
python
def iter(self, start=0, end=None): """Iterate through successive history items. Parameters ---------- end : int Index of the last item to loop through + 1. start : int Initial index for the loop (0 by default). """ if end is None: end = self._index + 1 elif end == 0: raise StopIteration() if start >= end: raise StopIteration() # Check arguments. assert 0 <= end <= len(self._history) assert 0 <= start <= end - 1 for i in range(start, end): yield self._history[i]
[ "def", "iter", "(", "self", ",", "start", "=", "0", ",", "end", "=", "None", ")", ":", "if", "end", "is", "None", ":", "end", "=", "self", ".", "_index", "+", "1", "elif", "end", "==", "0", ":", "raise", "StopIteration", "(", ")", "if", "start", ">=", "end", ":", "raise", "StopIteration", "(", ")", "# Check arguments.", "assert", "0", "<=", "end", "<=", "len", "(", "self", ".", "_history", ")", "assert", "0", "<=", "start", "<=", "end", "-", "1", "for", "i", "in", "range", "(", "start", ",", "end", ")", ":", "yield", "self", ".", "_history", "[", "i", "]" ]
Iterate through successive history items. Parameters ---------- end : int Index of the last item to loop through + 1. start : int Initial index for the loop (0 by default).
[ "Iterate", "through", "successive", "history", "items", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/_history.py#L51-L73
train
kwikteam/phy
phy/cluster/_history.py
History.add
def add(self, item): """Add an item in the history.""" self._check_index() # Possibly truncate the history up to the current point. self._history = self._history[:self._index + 1] # Append the item self._history.append(item) # Increment the index. self._index += 1 self._check_index() # Check that the current element is what was provided to the function. assert id(self.current_item) == id(item)
python
def add(self, item): """Add an item in the history.""" self._check_index() # Possibly truncate the history up to the current point. self._history = self._history[:self._index + 1] # Append the item self._history.append(item) # Increment the index. self._index += 1 self._check_index() # Check that the current element is what was provided to the function. assert id(self.current_item) == id(item)
[ "def", "add", "(", "self", ",", "item", ")", ":", "self", ".", "_check_index", "(", ")", "# Possibly truncate the history up to the current point.", "self", ".", "_history", "=", "self", ".", "_history", "[", ":", "self", ".", "_index", "+", "1", "]", "# Append the item", "self", ".", "_history", ".", "append", "(", "item", ")", "# Increment the index.", "self", ".", "_index", "+=", "1", "self", ".", "_check_index", "(", ")", "# Check that the current element is what was provided to the function.", "assert", "id", "(", "self", ".", "current_item", ")", "==", "id", "(", "item", ")" ]
Add an item in the history.
[ "Add", "an", "item", "in", "the", "history", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/_history.py#L81-L92
train
kwikteam/phy
phy/cluster/_history.py
History.back
def back(self): """Go back in history if possible. Return the undone item. """ if self._index <= 0: return None undone = self.current_item self._index -= 1 self._check_index() return undone
python
def back(self): """Go back in history if possible. Return the undone item. """ if self._index <= 0: return None undone = self.current_item self._index -= 1 self._check_index() return undone
[ "def", "back", "(", "self", ")", ":", "if", "self", ".", "_index", "<=", "0", ":", "return", "None", "undone", "=", "self", ".", "current_item", "self", ".", "_index", "-=", "1", "self", ".", "_check_index", "(", ")", "return", "undone" ]
Go back in history if possible. Return the undone item.
[ "Go", "back", "in", "history", "if", "possible", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/_history.py#L94-L105
train
kwikteam/phy
phy/cluster/_history.py
History.forward
def forward(self): """Go forward in history if possible. Return the current item after going forward. """ if self._index >= len(self._history) - 1: return None self._index += 1 self._check_index() return self.current_item
python
def forward(self): """Go forward in history if possible. Return the current item after going forward. """ if self._index >= len(self._history) - 1: return None self._index += 1 self._check_index() return self.current_item
[ "def", "forward", "(", "self", ")", ":", "if", "self", ".", "_index", ">=", "len", "(", "self", ".", "_history", ")", "-", "1", ":", "return", "None", "self", ".", "_index", "+=", "1", "self", ".", "_check_index", "(", ")", "return", "self", ".", "current_item" ]
Go forward in history if possible. Return the current item after going forward.
[ "Go", "forward", "in", "history", "if", "possible", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/_history.py#L110-L120
train
kwikteam/phy
phy/cluster/_history.py
GlobalHistory.add_to_current_action
def add_to_current_action(self, controller): """Add a controller to the current action.""" item = self.current_item self._history[self._index] = item + (controller,)
python
def add_to_current_action(self, controller): """Add a controller to the current action.""" item = self.current_item self._history[self._index] = item + (controller,)
[ "def", "add_to_current_action", "(", "self", ",", "controller", ")", ":", "item", "=", "self", ".", "current_item", "self", ".", "_history", "[", "self", ".", "_index", "]", "=", "item", "+", "(", "controller", ",", ")" ]
Add a controller to the current action.
[ "Add", "a", "controller", "to", "the", "current", "action", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/_history.py#L137-L140
train
kwikteam/phy
phy/cluster/_history.py
GlobalHistory.redo
def redo(self): """Redo the last action. This will call `redo()` on all controllers involved in this action. """ controllers = self.forward() if controllers is None: ups = () else: ups = tuple([controller.redo() for controller in controllers]) if self.process_ups is not None: return self.process_ups(ups) else: return ups
python
def redo(self): """Redo the last action. This will call `redo()` on all controllers involved in this action. """ controllers = self.forward() if controllers is None: ups = () else: ups = tuple([controller.redo() for controller in controllers]) if self.process_ups is not None: return self.process_ups(ups) else: return ups
[ "def", "redo", "(", "self", ")", ":", "controllers", "=", "self", ".", "forward", "(", ")", "if", "controllers", "is", "None", ":", "ups", "=", "(", ")", "else", ":", "ups", "=", "tuple", "(", "[", "controller", ".", "redo", "(", ")", "for", "controller", "in", "controllers", "]", ")", "if", "self", ".", "process_ups", "is", "not", "None", ":", "return", "self", ".", "process_ups", "(", "ups", ")", "else", ":", "return", "ups" ]
Redo the last action. This will call `redo()` on all controllers involved in this action.
[ "Redo", "the", "last", "action", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/_history.py#L159-L174
train
kwikteam/phy
phy/plot/base.py
_insert_glsl
def _insert_glsl(vertex, fragment, to_insert): """Insert snippets in a shader. to_insert is a dict `{(shader_type, location): snippet}`. Snippets can contain `{{ var }}` placeholders for the transformed variable name. """ # Find the place where to insert the GLSL snippet. # This is "gl_Position = transform(data_var_name);" where # data_var_name is typically an attribute. vs_regex = re.compile(r'gl_Position = transform\(([\S]+)\);') r = vs_regex.search(vertex) if not r: logger.debug("The vertex shader doesn't contain the transform " "placeholder: skipping the transform chain " "GLSL insertion.") return vertex, fragment assert r logger.log(5, "Found transform placeholder in vertex code: `%s`", r.group(0)) # Find the GLSL variable with the data (should be a `vec2`). var = r.group(1) assert var and var in vertex # Headers. vertex = to_insert['vert', 'header'] + '\n\n' + vertex fragment = to_insert['frag', 'header'] + '\n\n' + fragment # Get the pre and post transforms. vs_insert = to_insert['vert', 'before_transforms'] vs_insert += to_insert['vert', 'transforms'] vs_insert += to_insert['vert', 'after_transforms'] # Insert the GLSL snippet in the vertex shader. vertex = vs_regex.sub(indent(vs_insert), vertex) # Now, we make the replacements in the fragment shader. fs_regex = re.compile(r'(void main\(\)\s*\{)') # NOTE: we add the `void main(){` that was removed by the regex. fs_insert = '\\1\n' + to_insert['frag', 'before_transforms'] fragment = fs_regex.sub(indent(fs_insert), fragment) # Replace the transformed variable placeholder by its name. vertex = vertex.replace('{{ var }}', var) return vertex, fragment
python
def _insert_glsl(vertex, fragment, to_insert): """Insert snippets in a shader. to_insert is a dict `{(shader_type, location): snippet}`. Snippets can contain `{{ var }}` placeholders for the transformed variable name. """ # Find the place where to insert the GLSL snippet. # This is "gl_Position = transform(data_var_name);" where # data_var_name is typically an attribute. vs_regex = re.compile(r'gl_Position = transform\(([\S]+)\);') r = vs_regex.search(vertex) if not r: logger.debug("The vertex shader doesn't contain the transform " "placeholder: skipping the transform chain " "GLSL insertion.") return vertex, fragment assert r logger.log(5, "Found transform placeholder in vertex code: `%s`", r.group(0)) # Find the GLSL variable with the data (should be a `vec2`). var = r.group(1) assert var and var in vertex # Headers. vertex = to_insert['vert', 'header'] + '\n\n' + vertex fragment = to_insert['frag', 'header'] + '\n\n' + fragment # Get the pre and post transforms. vs_insert = to_insert['vert', 'before_transforms'] vs_insert += to_insert['vert', 'transforms'] vs_insert += to_insert['vert', 'after_transforms'] # Insert the GLSL snippet in the vertex shader. vertex = vs_regex.sub(indent(vs_insert), vertex) # Now, we make the replacements in the fragment shader. fs_regex = re.compile(r'(void main\(\)\s*\{)') # NOTE: we add the `void main(){` that was removed by the regex. fs_insert = '\\1\n' + to_insert['frag', 'before_transforms'] fragment = fs_regex.sub(indent(fs_insert), fragment) # Replace the transformed variable placeholder by its name. vertex = vertex.replace('{{ var }}', var) return vertex, fragment
[ "def", "_insert_glsl", "(", "vertex", ",", "fragment", ",", "to_insert", ")", ":", "# Find the place where to insert the GLSL snippet.", "# This is \"gl_Position = transform(data_var_name);\" where", "# data_var_name is typically an attribute.", "vs_regex", "=", "re", ".", "compile", "(", "r'gl_Position = transform\\(([\\S]+)\\);'", ")", "r", "=", "vs_regex", ".", "search", "(", "vertex", ")", "if", "not", "r", ":", "logger", ".", "debug", "(", "\"The vertex shader doesn't contain the transform \"", "\"placeholder: skipping the transform chain \"", "\"GLSL insertion.\"", ")", "return", "vertex", ",", "fragment", "assert", "r", "logger", ".", "log", "(", "5", ",", "\"Found transform placeholder in vertex code: `%s`\"", ",", "r", ".", "group", "(", "0", ")", ")", "# Find the GLSL variable with the data (should be a `vec2`).", "var", "=", "r", ".", "group", "(", "1", ")", "assert", "var", "and", "var", "in", "vertex", "# Headers.", "vertex", "=", "to_insert", "[", "'vert'", ",", "'header'", "]", "+", "'\\n\\n'", "+", "vertex", "fragment", "=", "to_insert", "[", "'frag'", ",", "'header'", "]", "+", "'\\n\\n'", "+", "fragment", "# Get the pre and post transforms.", "vs_insert", "=", "to_insert", "[", "'vert'", ",", "'before_transforms'", "]", "vs_insert", "+=", "to_insert", "[", "'vert'", ",", "'transforms'", "]", "vs_insert", "+=", "to_insert", "[", "'vert'", ",", "'after_transforms'", "]", "# Insert the GLSL snippet in the vertex shader.", "vertex", "=", "vs_regex", ".", "sub", "(", "indent", "(", "vs_insert", ")", ",", "vertex", ")", "# Now, we make the replacements in the fragment shader.", "fs_regex", "=", "re", ".", "compile", "(", "r'(void main\\(\\)\\s*\\{)'", ")", "# NOTE: we add the `void main(){` that was removed by the regex.", "fs_insert", "=", "'\\\\1\\n'", "+", "to_insert", "[", "'frag'", ",", "'before_transforms'", "]", "fragment", "=", "fs_regex", ".", "sub", "(", "indent", "(", "fs_insert", ")", ",", "fragment", ")", "# Replace the transformed variable placeholder by its name.", "vertex", "=", "vertex", ".", "replace", "(", "'{{ var }}'", ",", "var", ")", "return", "vertex", ",", "fragment" ]
Insert snippets in a shader. to_insert is a dict `{(shader_type, location): snippet}`. Snippets can contain `{{ var }}` placeholders for the transformed variable name.
[ "Insert", "snippets", "in", "a", "shader", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/base.py#L117-L165
train
kwikteam/phy
phy/plot/base.py
BaseVisual.on_draw
def on_draw(self): """Draw the visual.""" # Skip the drawing if the program hasn't been built yet. # The program is built by the interact. if self.program: # Draw the program. self.program.draw(self.gl_primitive_type) else: # pragma: no cover logger.debug("Skipping drawing visual `%s` because the program " "has not been built yet.", self)
python
def on_draw(self): """Draw the visual.""" # Skip the drawing if the program hasn't been built yet. # The program is built by the interact. if self.program: # Draw the program. self.program.draw(self.gl_primitive_type) else: # pragma: no cover logger.debug("Skipping drawing visual `%s` because the program " "has not been built yet.", self)
[ "def", "on_draw", "(", "self", ")", ":", "# Skip the drawing if the program hasn't been built yet.", "# The program is built by the interact.", "if", "self", ".", "program", ":", "# Draw the program.", "self", ".", "program", ".", "draw", "(", "self", ".", "gl_primitive_type", ")", "else", ":", "# pragma: no cover", "logger", ".", "debug", "(", "\"Skipping drawing visual `%s` because the program \"", "\"has not been built yet.\"", ",", "self", ")" ]
Draw the visual.
[ "Draw", "the", "visual", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/base.py#L67-L76
train
kwikteam/phy
phy/plot/base.py
GLSLInserter.add_transform_chain
def add_transform_chain(self, tc): """Insert the GLSL snippets of a transform chain.""" # Generate the transforms snippet. for t in tc.gpu_transforms: if isinstance(t, Clip): # Set the varying value in the vertex shader. self.insert_vert('v_temp_pos_tr = temp_pos_tr;') continue self.insert_vert(t.glsl('temp_pos_tr')) # Clipping. clip = tc.get('Clip') if clip: self.insert_frag(clip.glsl('v_temp_pos_tr'), 'before_transforms')
python
def add_transform_chain(self, tc): """Insert the GLSL snippets of a transform chain.""" # Generate the transforms snippet. for t in tc.gpu_transforms: if isinstance(t, Clip): # Set the varying value in the vertex shader. self.insert_vert('v_temp_pos_tr = temp_pos_tr;') continue self.insert_vert(t.glsl('temp_pos_tr')) # Clipping. clip = tc.get('Clip') if clip: self.insert_frag(clip.glsl('v_temp_pos_tr'), 'before_transforms')
[ "def", "add_transform_chain", "(", "self", ",", "tc", ")", ":", "# Generate the transforms snippet.", "for", "t", "in", "tc", ".", "gpu_transforms", ":", "if", "isinstance", "(", "t", ",", "Clip", ")", ":", "# Set the varying value in the vertex shader.", "self", ".", "insert_vert", "(", "'v_temp_pos_tr = temp_pos_tr;'", ")", "continue", "self", ".", "insert_vert", "(", "t", ".", "glsl", "(", "'temp_pos_tr'", ")", ")", "# Clipping.", "clip", "=", "tc", ".", "get", "(", "'Clip'", ")", "if", "clip", ":", "self", ".", "insert_frag", "(", "clip", ".", "glsl", "(", "'v_temp_pos_tr'", ")", ",", "'before_transforms'", ")" ]
Insert the GLSL snippets of a transform chain.
[ "Insert", "the", "GLSL", "snippets", "of", "a", "transform", "chain", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/base.py#L207-L219
train
kwikteam/phy
phy/plot/base.py
GLSLInserter.insert_into_shaders
def insert_into_shaders(self, vertex, fragment): """Apply the insertions to shader code.""" to_insert = defaultdict(str) to_insert.update({key: '\n'.join(self._to_insert[key]) + '\n' for key in self._to_insert}) return _insert_glsl(vertex, fragment, to_insert)
python
def insert_into_shaders(self, vertex, fragment): """Apply the insertions to shader code.""" to_insert = defaultdict(str) to_insert.update({key: '\n'.join(self._to_insert[key]) + '\n' for key in self._to_insert}) return _insert_glsl(vertex, fragment, to_insert)
[ "def", "insert_into_shaders", "(", "self", ",", "vertex", ",", "fragment", ")", ":", "to_insert", "=", "defaultdict", "(", "str", ")", "to_insert", ".", "update", "(", "{", "key", ":", "'\\n'", ".", "join", "(", "self", ".", "_to_insert", "[", "key", "]", ")", "+", "'\\n'", "for", "key", "in", "self", ".", "_to_insert", "}", ")", "return", "_insert_glsl", "(", "vertex", ",", "fragment", ",", "to_insert", ")" ]
Apply the insertions to shader code.
[ "Apply", "the", "insertions", "to", "shader", "code", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/base.py#L221-L226
train
kwikteam/phy
phy/plot/base.py
BaseCanvas.add_visual
def add_visual(self, visual): """Add a visual to the canvas, and build its program by the same occasion. We can't build the visual's program before, because we need the canvas' transforms first. """ # Retrieve the visual's GLSL inserter. inserter = visual.inserter # Add the visual's transforms. inserter.add_transform_chain(visual.transforms) # Then, add the canvas' transforms. canvas_transforms = visual.canvas_transforms_filter(self.transforms) inserter.add_transform_chain(canvas_transforms) # Also, add the canvas' inserter. inserter += self.inserter # Now, we insert the transforms GLSL into the shaders. vs, fs = visual.vertex_shader, visual.fragment_shader vs, fs = inserter.insert_into_shaders(vs, fs) # Finally, we create the visual's program. visual.program = gloo.Program(vs, fs) logger.log(5, "Vertex shader: %s", vs) logger.log(5, "Fragment shader: %s", fs) # Initialize the size. visual.on_resize(self.size) # Register the visual in the list of visuals in the canvas. self.visuals.append(visual) self.events.visual_added(visual=visual)
python
def add_visual(self, visual): """Add a visual to the canvas, and build its program by the same occasion. We can't build the visual's program before, because we need the canvas' transforms first. """ # Retrieve the visual's GLSL inserter. inserter = visual.inserter # Add the visual's transforms. inserter.add_transform_chain(visual.transforms) # Then, add the canvas' transforms. canvas_transforms = visual.canvas_transforms_filter(self.transforms) inserter.add_transform_chain(canvas_transforms) # Also, add the canvas' inserter. inserter += self.inserter # Now, we insert the transforms GLSL into the shaders. vs, fs = visual.vertex_shader, visual.fragment_shader vs, fs = inserter.insert_into_shaders(vs, fs) # Finally, we create the visual's program. visual.program = gloo.Program(vs, fs) logger.log(5, "Vertex shader: %s", vs) logger.log(5, "Fragment shader: %s", fs) # Initialize the size. visual.on_resize(self.size) # Register the visual in the list of visuals in the canvas. self.visuals.append(visual) self.events.visual_added(visual=visual)
[ "def", "add_visual", "(", "self", ",", "visual", ")", ":", "# Retrieve the visual's GLSL inserter.", "inserter", "=", "visual", ".", "inserter", "# Add the visual's transforms.", "inserter", ".", "add_transform_chain", "(", "visual", ".", "transforms", ")", "# Then, add the canvas' transforms.", "canvas_transforms", "=", "visual", ".", "canvas_transforms_filter", "(", "self", ".", "transforms", ")", "inserter", ".", "add_transform_chain", "(", "canvas_transforms", ")", "# Also, add the canvas' inserter.", "inserter", "+=", "self", ".", "inserter", "# Now, we insert the transforms GLSL into the shaders.", "vs", ",", "fs", "=", "visual", ".", "vertex_shader", ",", "visual", ".", "fragment_shader", "vs", ",", "fs", "=", "inserter", ".", "insert_into_shaders", "(", "vs", ",", "fs", ")", "# Finally, we create the visual's program.", "visual", ".", "program", "=", "gloo", ".", "Program", "(", "vs", ",", "fs", ")", "logger", ".", "log", "(", "5", ",", "\"Vertex shader: %s\"", ",", "vs", ")", "logger", ".", "log", "(", "5", ",", "\"Fragment shader: %s\"", ",", "fs", ")", "# Initialize the size.", "visual", ".", "on_resize", "(", "self", ".", "size", ")", "# Register the visual in the list of visuals in the canvas.", "self", ".", "visuals", ".", "append", "(", "visual", ")", "self", ".", "events", ".", "visual_added", "(", "visual", "=", "visual", ")" ]
Add a visual to the canvas, and build its program by the same occasion. We can't build the visual's program before, because we need the canvas' transforms first.
[ "Add", "a", "visual", "to", "the", "canvas", "and", "build", "its", "program", "by", "the", "same", "occasion", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/base.py#L258-L286
train
kwikteam/phy
phy/plot/base.py
BaseCanvas.on_resize
def on_resize(self, event): """Resize the OpenGL context.""" self.context.set_viewport(0, 0, event.size[0], event.size[1]) for visual in self.visuals: visual.on_resize(event.size) self.update()
python
def on_resize(self, event): """Resize the OpenGL context.""" self.context.set_viewport(0, 0, event.size[0], event.size[1]) for visual in self.visuals: visual.on_resize(event.size) self.update()
[ "def", "on_resize", "(", "self", ",", "event", ")", ":", "self", ".", "context", ".", "set_viewport", "(", "0", ",", "0", ",", "event", ".", "size", "[", "0", "]", ",", "event", ".", "size", "[", "1", "]", ")", "for", "visual", "in", "self", ".", "visuals", ":", "visual", ".", "on_resize", "(", "event", ".", "size", ")", "self", ".", "update", "(", ")" ]
Resize the OpenGL context.
[ "Resize", "the", "OpenGL", "context", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/base.py#L288-L293
train
kwikteam/phy
phy/plot/base.py
BaseCanvas.on_draw
def on_draw(self, e): """Draw all visuals.""" gloo.clear() for visual in self.visuals: logger.log(5, "Draw visual `%s`.", visual) visual.on_draw()
python
def on_draw(self, e): """Draw all visuals.""" gloo.clear() for visual in self.visuals: logger.log(5, "Draw visual `%s`.", visual) visual.on_draw()
[ "def", "on_draw", "(", "self", ",", "e", ")", ":", "gloo", ".", "clear", "(", ")", "for", "visual", "in", "self", ".", "visuals", ":", "logger", ".", "log", "(", "5", ",", "\"Draw visual `%s`.\"", ",", "visual", ")", "visual", ".", "on_draw", "(", ")" ]
Draw all visuals.
[ "Draw", "all", "visuals", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/base.py#L295-L300
train
kwikteam/phy
phy/plot/base.py
BaseInteract.update
def update(self): """Update all visuals in the attached canvas.""" if not self.canvas: return for visual in self.canvas.visuals: self.update_program(visual.program) self.canvas.update()
python
def update(self): """Update all visuals in the attached canvas.""" if not self.canvas: return for visual in self.canvas.visuals: self.update_program(visual.program) self.canvas.update()
[ "def", "update", "(", "self", ")", ":", "if", "not", "self", ".", "canvas", ":", "return", "for", "visual", "in", "self", ".", "canvas", ".", "visuals", ":", "self", ".", "update_program", "(", "visual", ".", "program", ")", "self", ".", "canvas", ".", "update", "(", ")" ]
Update all visuals in the attached canvas.
[ "Update", "all", "visuals", "in", "the", "attached", "canvas", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/base.py#L324-L330
train
kwikteam/phy
phy/utils/cli.py
_add_log_file
def _add_log_file(filename): """Create a `phy.log` log file with DEBUG level in the current directory.""" handler = logging.FileHandler(filename) handler.setLevel(logging.DEBUG) formatter = _Formatter(fmt=_logger_fmt, datefmt='%Y-%m-%d %H:%M:%S') handler.setFormatter(formatter) logging.getLogger().addHandler(handler)
python
def _add_log_file(filename): """Create a `phy.log` log file with DEBUG level in the current directory.""" handler = logging.FileHandler(filename) handler.setLevel(logging.DEBUG) formatter = _Formatter(fmt=_logger_fmt, datefmt='%Y-%m-%d %H:%M:%S') handler.setFormatter(formatter) logging.getLogger().addHandler(handler)
[ "def", "_add_log_file", "(", "filename", ")", ":", "handler", "=", "logging", ".", "FileHandler", "(", "filename", ")", "handler", ".", "setLevel", "(", "logging", ".", "DEBUG", ")", "formatter", "=", "_Formatter", "(", "fmt", "=", "_logger_fmt", ",", "datefmt", "=", "'%Y-%m-%d %H:%M:%S'", ")", "handler", ".", "setFormatter", "(", "formatter", ")", "logging", ".", "getLogger", "(", ")", ".", "addHandler", "(", "handler", ")" ]
Create a `phy.log` log file with DEBUG level in the current directory.
[ "Create", "a", "phy", ".", "log", "log", "file", "with", "DEBUG", "level", "in", "the", "current", "directory", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/utils/cli.py#L46-L55
train
kwikteam/phy
phy/utils/cli.py
_run_cmd
def _run_cmd(cmd, ctx, glob, loc): # pragma: no cover """Run a command with optionally a debugger, IPython, or profiling.""" if PDB: _enable_pdb() if IPYTHON: from IPython import start_ipython args_ipy = ['-i', '--gui=qt'] ns = glob.copy() ns.update(loc) return start_ipython(args_ipy, user_ns=ns) # Profiling. The builtin `profile` is added in __init__. prof = __builtins__.get('profile', None) if prof: prof = __builtins__['profile'] return _profile(prof, cmd, glob, loc) return exec_(cmd, glob, loc)
python
def _run_cmd(cmd, ctx, glob, loc): # pragma: no cover """Run a command with optionally a debugger, IPython, or profiling.""" if PDB: _enable_pdb() if IPYTHON: from IPython import start_ipython args_ipy = ['-i', '--gui=qt'] ns = glob.copy() ns.update(loc) return start_ipython(args_ipy, user_ns=ns) # Profiling. The builtin `profile` is added in __init__. prof = __builtins__.get('profile', None) if prof: prof = __builtins__['profile'] return _profile(prof, cmd, glob, loc) return exec_(cmd, glob, loc)
[ "def", "_run_cmd", "(", "cmd", ",", "ctx", ",", "glob", ",", "loc", ")", ":", "# pragma: no cover", "if", "PDB", ":", "_enable_pdb", "(", ")", "if", "IPYTHON", ":", "from", "IPython", "import", "start_ipython", "args_ipy", "=", "[", "'-i'", ",", "'--gui=qt'", "]", "ns", "=", "glob", ".", "copy", "(", ")", "ns", ".", "update", "(", "loc", ")", "return", "start_ipython", "(", "args_ipy", ",", "user_ns", "=", "ns", ")", "# Profiling. The builtin `profile` is added in __init__.", "prof", "=", "__builtins__", ".", "get", "(", "'profile'", ",", "None", ")", "if", "prof", ":", "prof", "=", "__builtins__", "[", "'profile'", "]", "return", "_profile", "(", "prof", ",", "cmd", ",", "glob", ",", "loc", ")", "return", "exec_", "(", "cmd", ",", "glob", ",", "loc", ")" ]
Run a command with optionally a debugger, IPython, or profiling.
[ "Run", "a", "command", "with", "optionally", "a", "debugger", "IPython", "or", "profiling", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/utils/cli.py#L58-L73
train
kwikteam/phy
phy/utils/cli.py
load_cli_plugins
def load_cli_plugins(cli, config_dir=None): """Load all plugins and attach them to a CLI object.""" from .config import load_master_config config = load_master_config(config_dir=config_dir) plugins = discover_plugins(config.Plugins.dirs) for plugin in plugins: if not hasattr(plugin, 'attach_to_cli'): # pragma: no cover continue logger.debug("Attach plugin `%s` to CLI.", _fullname(plugin)) # NOTE: plugin is a class, so we need to instantiate it. try: plugin().attach_to_cli(cli) except Exception as e: # pragma: no cover logger.error("Error when loading plugin `%s`: %s", plugin, e)
python
def load_cli_plugins(cli, config_dir=None): """Load all plugins and attach them to a CLI object.""" from .config import load_master_config config = load_master_config(config_dir=config_dir) plugins = discover_plugins(config.Plugins.dirs) for plugin in plugins: if not hasattr(plugin, 'attach_to_cli'): # pragma: no cover continue logger.debug("Attach plugin `%s` to CLI.", _fullname(plugin)) # NOTE: plugin is a class, so we need to instantiate it. try: plugin().attach_to_cli(cli) except Exception as e: # pragma: no cover logger.error("Error when loading plugin `%s`: %s", plugin, e)
[ "def", "load_cli_plugins", "(", "cli", ",", "config_dir", "=", "None", ")", ":", "from", ".", "config", "import", "load_master_config", "config", "=", "load_master_config", "(", "config_dir", "=", "config_dir", ")", "plugins", "=", "discover_plugins", "(", "config", ".", "Plugins", ".", "dirs", ")", "for", "plugin", "in", "plugins", ":", "if", "not", "hasattr", "(", "plugin", ",", "'attach_to_cli'", ")", ":", "# pragma: no cover", "continue", "logger", ".", "debug", "(", "\"Attach plugin `%s` to CLI.\"", ",", "_fullname", "(", "plugin", ")", ")", "# NOTE: plugin is a class, so we need to instantiate it.", "try", ":", "plugin", "(", ")", ".", "attach_to_cli", "(", "cli", ")", "except", "Exception", "as", "e", ":", "# pragma: no cover", "logger", ".", "error", "(", "\"Error when loading plugin `%s`: %s\"", ",", "plugin", ",", "e", ")" ]
Load all plugins and attach them to a CLI object.
[ "Load", "all", "plugins", "and", "attach", "them", "to", "a", "CLI", "object", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/utils/cli.py#L94-L109
train
kwikteam/phy
phy/plot/panzoom.py
PanZoom.get_mouse_pos
def get_mouse_pos(self, pos): """Return the mouse coordinates in NDC, taking panzoom into account.""" position = np.asarray(self._normalize(pos)) zoom = np.asarray(self._zoom_aspect()) pan = np.asarray(self.pan) mouse_pos = ((position / zoom) - pan) return mouse_pos
python
def get_mouse_pos(self, pos): """Return the mouse coordinates in NDC, taking panzoom into account.""" position = np.asarray(self._normalize(pos)) zoom = np.asarray(self._zoom_aspect()) pan = np.asarray(self.pan) mouse_pos = ((position / zoom) - pan) return mouse_pos
[ "def", "get_mouse_pos", "(", "self", ",", "pos", ")", ":", "position", "=", "np", ".", "asarray", "(", "self", ".", "_normalize", "(", "pos", ")", ")", "zoom", "=", "np", ".", "asarray", "(", "self", ".", "_zoom_aspect", "(", ")", ")", "pan", "=", "np", ".", "asarray", "(", "self", ".", "pan", ")", "mouse_pos", "=", "(", "(", "position", "/", "zoom", ")", "-", "pan", ")", "return", "mouse_pos" ]
Return the mouse coordinates in NDC, taking panzoom into account.
[ "Return", "the", "mouse", "coordinates", "in", "NDC", "taking", "panzoom", "into", "account", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/panzoom.py#L228-L234
train
kwikteam/phy
phy/plot/panzoom.py
PanZoom.pan
def pan(self, value): """Pan translation.""" assert len(value) == 2 self._pan[:] = value self._constrain_pan() self.update()
python
def pan(self, value): """Pan translation.""" assert len(value) == 2 self._pan[:] = value self._constrain_pan() self.update()
[ "def", "pan", "(", "self", ",", "value", ")", ":", "assert", "len", "(", "value", ")", "==", "2", "self", ".", "_pan", "[", ":", "]", "=", "value", "self", ".", "_constrain_pan", "(", ")", "self", ".", "update", "(", ")" ]
Pan translation.
[ "Pan", "translation", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/panzoom.py#L245-L250
train
kwikteam/phy
phy/plot/panzoom.py
PanZoom.zoom
def zoom(self, value): """Zoom level.""" if isinstance(value, (int, float)): value = (value, value) assert len(value) == 2 self._zoom = np.clip(value, self._zmin, self._zmax) # Constrain bounding box. self._constrain_pan() self._constrain_zoom() self.update()
python
def zoom(self, value): """Zoom level.""" if isinstance(value, (int, float)): value = (value, value) assert len(value) == 2 self._zoom = np.clip(value, self._zmin, self._zmax) # Constrain bounding box. self._constrain_pan() self._constrain_zoom() self.update()
[ "def", "zoom", "(", "self", ",", "value", ")", ":", "if", "isinstance", "(", "value", ",", "(", "int", ",", "float", ")", ")", ":", "value", "=", "(", "value", ",", "value", ")", "assert", "len", "(", "value", ")", "==", "2", "self", ".", "_zoom", "=", "np", ".", "clip", "(", "value", ",", "self", ".", "_zmin", ",", "self", ".", "_zmax", ")", "# Constrain bounding box.", "self", ".", "_constrain_pan", "(", ")", "self", ".", "_constrain_zoom", "(", ")", "self", ".", "update", "(", ")" ]
Zoom level.
[ "Zoom", "level", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/panzoom.py#L258-L269
train
kwikteam/phy
phy/plot/panzoom.py
PanZoom.pan_delta
def pan_delta(self, d): """Pan the view by a given amount.""" dx, dy = d pan_x, pan_y = self.pan zoom_x, zoom_y = self._zoom_aspect(self._zoom) self.pan = (pan_x + dx / zoom_x, pan_y + dy / zoom_y) self.update()
python
def pan_delta(self, d): """Pan the view by a given amount.""" dx, dy = d pan_x, pan_y = self.pan zoom_x, zoom_y = self._zoom_aspect(self._zoom) self.pan = (pan_x + dx / zoom_x, pan_y + dy / zoom_y) self.update()
[ "def", "pan_delta", "(", "self", ",", "d", ")", ":", "dx", ",", "dy", "=", "d", "pan_x", ",", "pan_y", "=", "self", ".", "pan", "zoom_x", ",", "zoom_y", "=", "self", ".", "_zoom_aspect", "(", "self", ".", "_zoom", ")", "self", ".", "pan", "=", "(", "pan_x", "+", "dx", "/", "zoom_x", ",", "pan_y", "+", "dy", "/", "zoom_y", ")", "self", ".", "update", "(", ")" ]
Pan the view by a given amount.
[ "Pan", "the", "view", "by", "a", "given", "amount", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/panzoom.py#L271-L279
train
kwikteam/phy
phy/plot/panzoom.py
PanZoom.zoom_delta
def zoom_delta(self, d, p=(0., 0.), c=1.): """Zoom the view by a given amount.""" dx, dy = d x0, y0 = p pan_x, pan_y = self._pan zoom_x, zoom_y = self._zoom zoom_x_new, zoom_y_new = (zoom_x * math.exp(c * self._zoom_coeff * dx), zoom_y * math.exp(c * self._zoom_coeff * dy)) zoom_x_new = max(min(zoom_x_new, self._zmax), self._zmin) zoom_y_new = max(min(zoom_y_new, self._zmax), self._zmin) self.zoom = zoom_x_new, zoom_y_new if self._zoom_to_pointer: zoom_x, zoom_y = self._zoom_aspect((zoom_x, zoom_y)) zoom_x_new, zoom_y_new = self._zoom_aspect((zoom_x_new, zoom_y_new)) self.pan = (pan_x - x0 * (1. / zoom_x - 1. / zoom_x_new), pan_y - y0 * (1. / zoom_y - 1. / zoom_y_new)) self.update()
python
def zoom_delta(self, d, p=(0., 0.), c=1.): """Zoom the view by a given amount.""" dx, dy = d x0, y0 = p pan_x, pan_y = self._pan zoom_x, zoom_y = self._zoom zoom_x_new, zoom_y_new = (zoom_x * math.exp(c * self._zoom_coeff * dx), zoom_y * math.exp(c * self._zoom_coeff * dy)) zoom_x_new = max(min(zoom_x_new, self._zmax), self._zmin) zoom_y_new = max(min(zoom_y_new, self._zmax), self._zmin) self.zoom = zoom_x_new, zoom_y_new if self._zoom_to_pointer: zoom_x, zoom_y = self._zoom_aspect((zoom_x, zoom_y)) zoom_x_new, zoom_y_new = self._zoom_aspect((zoom_x_new, zoom_y_new)) self.pan = (pan_x - x0 * (1. / zoom_x - 1. / zoom_x_new), pan_y - y0 * (1. / zoom_y - 1. / zoom_y_new)) self.update()
[ "def", "zoom_delta", "(", "self", ",", "d", ",", "p", "=", "(", "0.", ",", "0.", ")", ",", "c", "=", "1.", ")", ":", "dx", ",", "dy", "=", "d", "x0", ",", "y0", "=", "p", "pan_x", ",", "pan_y", "=", "self", ".", "_pan", "zoom_x", ",", "zoom_y", "=", "self", ".", "_zoom", "zoom_x_new", ",", "zoom_y_new", "=", "(", "zoom_x", "*", "math", ".", "exp", "(", "c", "*", "self", ".", "_zoom_coeff", "*", "dx", ")", ",", "zoom_y", "*", "math", ".", "exp", "(", "c", "*", "self", ".", "_zoom_coeff", "*", "dy", ")", ")", "zoom_x_new", "=", "max", "(", "min", "(", "zoom_x_new", ",", "self", ".", "_zmax", ")", ",", "self", ".", "_zmin", ")", "zoom_y_new", "=", "max", "(", "min", "(", "zoom_y_new", ",", "self", ".", "_zmax", ")", ",", "self", ".", "_zmin", ")", "self", ".", "zoom", "=", "zoom_x_new", ",", "zoom_y_new", "if", "self", ".", "_zoom_to_pointer", ":", "zoom_x", ",", "zoom_y", "=", "self", ".", "_zoom_aspect", "(", "(", "zoom_x", ",", "zoom_y", ")", ")", "zoom_x_new", ",", "zoom_y_new", "=", "self", ".", "_zoom_aspect", "(", "(", "zoom_x_new", ",", "zoom_y_new", ")", ")", "self", ".", "pan", "=", "(", "pan_x", "-", "x0", "*", "(", "1.", "/", "zoom_x", "-", "1.", "/", "zoom_x_new", ")", ",", "pan_y", "-", "y0", "*", "(", "1.", "/", "zoom_y", "-", "1.", "/", "zoom_y_new", ")", ")", "self", ".", "update", "(", ")" ]
Zoom the view by a given amount.
[ "Zoom", "the", "view", "by", "a", "given", "amount", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/panzoom.py#L281-L305
train
kwikteam/phy
phy/plot/panzoom.py
PanZoom.set_range
def set_range(self, bounds, keep_aspect=False): """Zoom to fit a box.""" # a * (v0 + t) = -1 # a * (v1 + t) = +1 # => # a * (v1 - v0) = 2 bounds = np.asarray(bounds, dtype=np.float64) v0 = bounds[:2] v1 = bounds[2:] pan = -.5 * (v0 + v1) zoom = 2. / (v1 - v0) if keep_aspect: zoom = zoom.min() * np.ones(2) self.set_pan_zoom(pan=pan, zoom=zoom)
python
def set_range(self, bounds, keep_aspect=False): """Zoom to fit a box.""" # a * (v0 + t) = -1 # a * (v1 + t) = +1 # => # a * (v1 - v0) = 2 bounds = np.asarray(bounds, dtype=np.float64) v0 = bounds[:2] v1 = bounds[2:] pan = -.5 * (v0 + v1) zoom = 2. / (v1 - v0) if keep_aspect: zoom = zoom.min() * np.ones(2) self.set_pan_zoom(pan=pan, zoom=zoom)
[ "def", "set_range", "(", "self", ",", "bounds", ",", "keep_aspect", "=", "False", ")", ":", "# a * (v0 + t) = -1", "# a * (v1 + t) = +1", "# =>", "# a * (v1 - v0) = 2", "bounds", "=", "np", ".", "asarray", "(", "bounds", ",", "dtype", "=", "np", ".", "float64", ")", "v0", "=", "bounds", "[", ":", "2", "]", "v1", "=", "bounds", "[", "2", ":", "]", "pan", "=", "-", ".5", "*", "(", "v0", "+", "v1", ")", "zoom", "=", "2.", "/", "(", "v1", "-", "v0", ")", "if", "keep_aspect", ":", "zoom", "=", "zoom", ".", "min", "(", ")", "*", "np", ".", "ones", "(", "2", ")", "self", ".", "set_pan_zoom", "(", "pan", "=", "pan", ",", "zoom", "=", "zoom", ")" ]
Zoom to fit a box.
[ "Zoom", "to", "fit", "a", "box", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/panzoom.py#L317-L330
train
kwikteam/phy
phy/plot/panzoom.py
PanZoom.get_range
def get_range(self): """Return the bounds currently visible.""" p, z = np.asarray(self.pan), np.asarray(self.zoom) x0, y0 = -1. / z - p x1, y1 = +1. / z - p return (x0, y0, x1, y1)
python
def get_range(self): """Return the bounds currently visible.""" p, z = np.asarray(self.pan), np.asarray(self.zoom) x0, y0 = -1. / z - p x1, y1 = +1. / z - p return (x0, y0, x1, y1)
[ "def", "get_range", "(", "self", ")", ":", "p", ",", "z", "=", "np", ".", "asarray", "(", "self", ".", "pan", ")", ",", "np", ".", "asarray", "(", "self", ".", "zoom", ")", "x0", ",", "y0", "=", "-", "1.", "/", "z", "-", "p", "x1", ",", "y1", "=", "+", "1.", "/", "z", "-", "p", "return", "(", "x0", ",", "y0", ",", "x1", ",", "y1", ")" ]
Return the bounds currently visible.
[ "Return", "the", "bounds", "currently", "visible", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/panzoom.py#L332-L337
train
kwikteam/phy
phy/plot/panzoom.py
PanZoom.on_mouse_move
def on_mouse_move(self, event): """Pan and zoom with the mouse.""" if event.modifiers: return if event.is_dragging: x0, y0 = self._normalize(event.press_event.pos) x1, y1 = self._normalize(event.last_event.pos) x, y = self._normalize(event.pos) dx, dy = x - x1, y - y1 if event.button == 1: self.pan_delta((dx, dy)) elif event.button == 2: c = np.sqrt(self.size[0]) * .03 self.zoom_delta((dx, dy), (x0, y0), c=c)
python
def on_mouse_move(self, event): """Pan and zoom with the mouse.""" if event.modifiers: return if event.is_dragging: x0, y0 = self._normalize(event.press_event.pos) x1, y1 = self._normalize(event.last_event.pos) x, y = self._normalize(event.pos) dx, dy = x - x1, y - y1 if event.button == 1: self.pan_delta((dx, dy)) elif event.button == 2: c = np.sqrt(self.size[0]) * .03 self.zoom_delta((dx, dy), (x0, y0), c=c)
[ "def", "on_mouse_move", "(", "self", ",", "event", ")", ":", "if", "event", ".", "modifiers", ":", "return", "if", "event", ".", "is_dragging", ":", "x0", ",", "y0", "=", "self", ".", "_normalize", "(", "event", ".", "press_event", ".", "pos", ")", "x1", ",", "y1", "=", "self", ".", "_normalize", "(", "event", ".", "last_event", ".", "pos", ")", "x", ",", "y", "=", "self", ".", "_normalize", "(", "event", ".", "pos", ")", "dx", ",", "dy", "=", "x", "-", "x1", ",", "y", "-", "y1", "if", "event", ".", "button", "==", "1", ":", "self", ".", "pan_delta", "(", "(", "dx", ",", "dy", ")", ")", "elif", "event", ".", "button", "==", "2", ":", "c", "=", "np", ".", "sqrt", "(", "self", ".", "size", "[", "0", "]", ")", "*", ".03", "self", ".", "zoom_delta", "(", "(", "dx", ",", "dy", ")", ",", "(", "x0", ",", "y0", ")", ",", "c", "=", "c", ")" ]
Pan and zoom with the mouse.
[ "Pan", "and", "zoom", "with", "the", "mouse", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/panzoom.py#L386-L399
train
kwikteam/phy
phy/plot/panzoom.py
PanZoom.on_mouse_wheel
def on_mouse_wheel(self, event): """Zoom with the mouse wheel.""" # NOTE: not called on OS X because of touchpad if event.modifiers: return dx = np.sign(event.delta[1]) * self._wheel_coeff # Zoom toward the mouse pointer. x0, y0 = self._normalize(event.pos) self.zoom_delta((dx, dx), (x0, y0))
python
def on_mouse_wheel(self, event): """Zoom with the mouse wheel.""" # NOTE: not called on OS X because of touchpad if event.modifiers: return dx = np.sign(event.delta[1]) * self._wheel_coeff # Zoom toward the mouse pointer. x0, y0 = self._normalize(event.pos) self.zoom_delta((dx, dx), (x0, y0))
[ "def", "on_mouse_wheel", "(", "self", ",", "event", ")", ":", "# NOTE: not called on OS X because of touchpad", "if", "event", ".", "modifiers", ":", "return", "dx", "=", "np", ".", "sign", "(", "event", ".", "delta", "[", "1", "]", ")", "*", "self", ".", "_wheel_coeff", "# Zoom toward the mouse pointer.", "x0", ",", "y0", "=", "self", ".", "_normalize", "(", "event", ".", "pos", ")", "self", ".", "zoom_delta", "(", "(", "dx", ",", "dx", ")", ",", "(", "x0", ",", "y0", ")", ")" ]
Zoom with the mouse wheel.
[ "Zoom", "with", "the", "mouse", "wheel", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/panzoom.py#L425-L433
train
kwikteam/phy
phy/plot/panzoom.py
PanZoom.on_key_press
def on_key_press(self, event): """Pan and zoom with the keyboard.""" # Zooming with the keyboard. key = event.key if event.modifiers: return # Pan. if self.enable_keyboard_pan and key in self._arrows: self._pan_keyboard(key) # Zoom. if key in self._pm: self._zoom_keyboard(key) # Reset with 'R'. if key == 'R': self.reset()
python
def on_key_press(self, event): """Pan and zoom with the keyboard.""" # Zooming with the keyboard. key = event.key if event.modifiers: return # Pan. if self.enable_keyboard_pan and key in self._arrows: self._pan_keyboard(key) # Zoom. if key in self._pm: self._zoom_keyboard(key) # Reset with 'R'. if key == 'R': self.reset()
[ "def", "on_key_press", "(", "self", ",", "event", ")", ":", "# Zooming with the keyboard.", "key", "=", "event", ".", "key", "if", "event", ".", "modifiers", ":", "return", "# Pan.", "if", "self", ".", "enable_keyboard_pan", "and", "key", "in", "self", ".", "_arrows", ":", "self", ".", "_pan_keyboard", "(", "key", ")", "# Zoom.", "if", "key", "in", "self", ".", "_pm", ":", "self", ".", "_zoom_keyboard", "(", "key", ")", "# Reset with 'R'.", "if", "key", "==", "'R'", ":", "self", ".", "reset", "(", ")" ]
Pan and zoom with the keyboard.
[ "Pan", "and", "zoom", "with", "the", "keyboard", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/panzoom.py#L435-L452
train
kwikteam/phy
tools/api.py
_replace_docstring_header
def _replace_docstring_header(paragraph): """Process NumPy-like function docstrings.""" # Replace Markdown headers in docstrings with light headers in bold. paragraph = re.sub(_docstring_header_pattern, r'*\1*', paragraph, ) paragraph = re.sub(_docstring_parameters_pattern, r'\n* `\1` (\2)\n', paragraph, ) return paragraph
python
def _replace_docstring_header(paragraph): """Process NumPy-like function docstrings.""" # Replace Markdown headers in docstrings with light headers in bold. paragraph = re.sub(_docstring_header_pattern, r'*\1*', paragraph, ) paragraph = re.sub(_docstring_parameters_pattern, r'\n* `\1` (\2)\n', paragraph, ) return paragraph
[ "def", "_replace_docstring_header", "(", "paragraph", ")", ":", "# Replace Markdown headers in docstrings with light headers in bold.", "paragraph", "=", "re", ".", "sub", "(", "_docstring_header_pattern", ",", "r'*\\1*'", ",", "paragraph", ",", ")", "paragraph", "=", "re", ".", "sub", "(", "_docstring_parameters_pattern", ",", "r'\\n* `\\1` (\\2)\\n'", ",", "paragraph", ",", ")", "return", "paragraph" ]
Process NumPy-like function docstrings.
[ "Process", "NumPy", "-", "like", "function", "docstrings", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/tools/api.py#L47-L61
train
kwikteam/phy
tools/api.py
_iter_vars
def _iter_vars(mod): """Iterate through a list of variables define in a module's public namespace.""" vars = sorted(var for var in dir(mod) if _is_public(var)) for var in vars: yield getattr(mod, var)
python
def _iter_vars(mod): """Iterate through a list of variables define in a module's public namespace.""" vars = sorted(var for var in dir(mod) if _is_public(var)) for var in vars: yield getattr(mod, var)
[ "def", "_iter_vars", "(", "mod", ")", ":", "vars", "=", "sorted", "(", "var", "for", "var", "in", "dir", "(", "mod", ")", "if", "_is_public", "(", "var", ")", ")", "for", "var", "in", "vars", ":", "yield", "getattr", "(", "mod", ",", "var", ")" ]
Iterate through a list of variables define in a module's public namespace.
[ "Iterate", "through", "a", "list", "of", "variables", "define", "in", "a", "module", "s", "public", "namespace", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/tools/api.py#L145-L150
train
kwikteam/phy
tools/api.py
_function_header
def _function_header(subpackage, func): """Generate the docstring of a function.""" args = inspect.formatargspec(*inspect.getfullargspec(func)) return "{name}{args}".format(name=_full_name(subpackage, func), args=args, )
python
def _function_header(subpackage, func): """Generate the docstring of a function.""" args = inspect.formatargspec(*inspect.getfullargspec(func)) return "{name}{args}".format(name=_full_name(subpackage, func), args=args, )
[ "def", "_function_header", "(", "subpackage", ",", "func", ")", ":", "args", "=", "inspect", ".", "formatargspec", "(", "*", "inspect", ".", "getfullargspec", "(", "func", ")", ")", "return", "\"{name}{args}\"", ".", "format", "(", "name", "=", "_full_name", "(", "subpackage", ",", "func", ")", ",", "args", "=", "args", ",", ")" ]
Generate the docstring of a function.
[ "Generate", "the", "docstring", "of", "a", "function", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/tools/api.py#L185-L190
train
kwikteam/phy
tools/api.py
_doc_method
def _doc_method(klass, func): """Generate the docstring of a method.""" argspec = inspect.getfullargspec(func) # Remove first 'self' argument. if argspec.args and argspec.args[0] == 'self': del argspec.args[0] args = inspect.formatargspec(*argspec) header = "{klass}.{name}{args}".format(klass=klass.__name__, name=_name(func), args=args, ) docstring = _doc(func) return _concat(header, docstring)
python
def _doc_method(klass, func): """Generate the docstring of a method.""" argspec = inspect.getfullargspec(func) # Remove first 'self' argument. if argspec.args and argspec.args[0] == 'self': del argspec.args[0] args = inspect.formatargspec(*argspec) header = "{klass}.{name}{args}".format(klass=klass.__name__, name=_name(func), args=args, ) docstring = _doc(func) return _concat(header, docstring)
[ "def", "_doc_method", "(", "klass", ",", "func", ")", ":", "argspec", "=", "inspect", ".", "getfullargspec", "(", "func", ")", "# Remove first 'self' argument.", "if", "argspec", ".", "args", "and", "argspec", ".", "args", "[", "0", "]", "==", "'self'", ":", "del", "argspec", ".", "args", "[", "0", "]", "args", "=", "inspect", ".", "formatargspec", "(", "*", "argspec", ")", "header", "=", "\"{klass}.{name}{args}\"", ".", "format", "(", "klass", "=", "klass", ".", "__name__", ",", "name", "=", "_name", "(", "func", ")", ",", "args", "=", "args", ",", ")", "docstring", "=", "_doc", "(", "func", ")", "return", "_concat", "(", "header", ",", "docstring", ")" ]
Generate the docstring of a method.
[ "Generate", "the", "docstring", "of", "a", "method", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/tools/api.py#L199-L211
train
kwikteam/phy
tools/api.py
_doc_property
def _doc_property(klass, prop): """Generate the docstring of a property.""" header = "{klass}.{name}".format(klass=klass.__name__, name=_name(prop), ) docstring = _doc(prop) return _concat(header, docstring)
python
def _doc_property(klass, prop): """Generate the docstring of a property.""" header = "{klass}.{name}".format(klass=klass.__name__, name=_name(prop), ) docstring = _doc(prop) return _concat(header, docstring)
[ "def", "_doc_property", "(", "klass", ",", "prop", ")", ":", "header", "=", "\"{klass}.{name}\"", ".", "format", "(", "klass", "=", "klass", ".", "__name__", ",", "name", "=", "_name", "(", "prop", ")", ",", ")", "docstring", "=", "_doc", "(", "prop", ")", "return", "_concat", "(", "header", ",", "docstring", ")" ]
Generate the docstring of a property.
[ "Generate", "the", "docstring", "of", "a", "property", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/tools/api.py#L214-L220
train
kwikteam/phy
tools/api.py
_generate_paragraphs
def _generate_paragraphs(package, subpackages): """Generate the paragraphs of the API documentation.""" # API doc of each module. for subpackage in _iter_subpackages(package, subpackages): subpackage_name = subpackage.__name__ yield "## {}".format(subpackage_name) # Subpackage documentation. yield _doc(_import_module(subpackage_name)) # List of top-level functions in the subpackage. for func in _iter_functions(subpackage): yield '##### ' + _doc_function(subpackage, func) # All public classes. for klass in _iter_classes(subpackage): # Class documentation. yield "### {}".format(_full_name(subpackage, klass)) yield _doc(klass) yield "#### Methods" for method in _iter_methods(klass, package): yield '##### ' + _doc_method(klass, method) yield "#### Properties" for prop in _iter_properties(klass, package): yield '##### ' + _doc_property(klass, prop)
python
def _generate_paragraphs(package, subpackages): """Generate the paragraphs of the API documentation.""" # API doc of each module. for subpackage in _iter_subpackages(package, subpackages): subpackage_name = subpackage.__name__ yield "## {}".format(subpackage_name) # Subpackage documentation. yield _doc(_import_module(subpackage_name)) # List of top-level functions in the subpackage. for func in _iter_functions(subpackage): yield '##### ' + _doc_function(subpackage, func) # All public classes. for klass in _iter_classes(subpackage): # Class documentation. yield "### {}".format(_full_name(subpackage, klass)) yield _doc(klass) yield "#### Methods" for method in _iter_methods(klass, package): yield '##### ' + _doc_method(klass, method) yield "#### Properties" for prop in _iter_properties(klass, package): yield '##### ' + _doc_property(klass, prop)
[ "def", "_generate_paragraphs", "(", "package", ",", "subpackages", ")", ":", "# API doc of each module.", "for", "subpackage", "in", "_iter_subpackages", "(", "package", ",", "subpackages", ")", ":", "subpackage_name", "=", "subpackage", ".", "__name__", "yield", "\"## {}\"", ".", "format", "(", "subpackage_name", ")", "# Subpackage documentation.", "yield", "_doc", "(", "_import_module", "(", "subpackage_name", ")", ")", "# List of top-level functions in the subpackage.", "for", "func", "in", "_iter_functions", "(", "subpackage", ")", ":", "yield", "'##### '", "+", "_doc_function", "(", "subpackage", ",", "func", ")", "# All public classes.", "for", "klass", "in", "_iter_classes", "(", "subpackage", ")", ":", "# Class documentation.", "yield", "\"### {}\"", ".", "format", "(", "_full_name", "(", "subpackage", ",", "klass", ")", ")", "yield", "_doc", "(", "klass", ")", "yield", "\"#### Methods\"", "for", "method", "in", "_iter_methods", "(", "klass", ",", "package", ")", ":", "yield", "'##### '", "+", "_doc_method", "(", "klass", ",", "method", ")", "yield", "\"#### Properties\"", "for", "prop", "in", "_iter_properties", "(", "klass", ",", "package", ")", ":", "yield", "'##### '", "+", "_doc_property", "(", "klass", ",", "prop", ")" ]
Generate the paragraphs of the API documentation.
[ "Generate", "the", "paragraphs", "of", "the", "API", "documentation", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/tools/api.py#L260-L289
train
kwikteam/phy
phy/cluster/supervisor.py
Supervisor._add_field_column
def _add_field_column(self, field): # pragma: no cover """Add a column for a given label field.""" @self.add_column(name=field) def get_my_label(cluster_id): return self.cluster_meta.get(field, cluster_id)
python
def _add_field_column(self, field): # pragma: no cover """Add a column for a given label field.""" @self.add_column(name=field) def get_my_label(cluster_id): return self.cluster_meta.get(field, cluster_id)
[ "def", "_add_field_column", "(", "self", ",", "field", ")", ":", "# pragma: no cover", "@", "self", ".", "add_column", "(", "name", "=", "field", ")", "def", "get_my_label", "(", "cluster_id", ")", ":", "return", "self", ".", "cluster_meta", ".", "get", "(", "field", ",", "cluster_id", ")" ]
Add a column for a given label field.
[ "Add", "a", "column", "for", "a", "given", "label", "field", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/supervisor.py#L234-L238
train
kwikteam/phy
phy/cluster/supervisor.py
Supervisor._emit_select
def _emit_select(self, cluster_ids, **kwargs): """Choose spikes from the specified clusters and emit the `select` event on the GUI.""" # Remove non-existing clusters from the selection. cluster_ids = self._keep_existing_clusters(cluster_ids) logger.debug("Select cluster(s): %s.", ', '.join(map(str, cluster_ids))) self.emit('select', cluster_ids, **kwargs)
python
def _emit_select(self, cluster_ids, **kwargs): """Choose spikes from the specified clusters and emit the `select` event on the GUI.""" # Remove non-existing clusters from the selection. cluster_ids = self._keep_existing_clusters(cluster_ids) logger.debug("Select cluster(s): %s.", ', '.join(map(str, cluster_ids))) self.emit('select', cluster_ids, **kwargs)
[ "def", "_emit_select", "(", "self", ",", "cluster_ids", ",", "*", "*", "kwargs", ")", ":", "# Remove non-existing clusters from the selection.", "cluster_ids", "=", "self", ".", "_keep_existing_clusters", "(", "cluster_ids", ")", "logger", ".", "debug", "(", "\"Select cluster(s): %s.\"", ",", "', '", ".", "join", "(", "map", "(", "str", ",", "cluster_ids", ")", ")", ")", "self", ".", "emit", "(", "'select'", ",", "cluster_ids", ",", "*", "*", "kwargs", ")" ]
Choose spikes from the specified clusters and emit the `select` event on the GUI.
[ "Choose", "spikes", "from", "the", "specified", "clusters", "and", "emit", "the", "select", "event", "on", "the", "GUI", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/supervisor.py#L336-L343
train
kwikteam/phy
phy/cluster/supervisor.py
Supervisor._update_cluster_view
def _update_cluster_view(self): """Initialize the cluster view with cluster data.""" logger.log(5, "Update the cluster view.") cluster_ids = [int(c) for c in self.clustering.cluster_ids] self.cluster_view.set_rows(cluster_ids)
python
def _update_cluster_view(self): """Initialize the cluster view with cluster data.""" logger.log(5, "Update the cluster view.") cluster_ids = [int(c) for c in self.clustering.cluster_ids] self.cluster_view.set_rows(cluster_ids)
[ "def", "_update_cluster_view", "(", "self", ")", ":", "logger", ".", "log", "(", "5", ",", "\"Update the cluster view.\"", ")", "cluster_ids", "=", "[", "int", "(", "c", ")", "for", "c", "in", "self", ".", "clustering", ".", "cluster_ids", "]", "self", ".", "cluster_view", ".", "set_rows", "(", "cluster_ids", ")" ]
Initialize the cluster view with cluster data.
[ "Initialize", "the", "cluster", "view", "with", "cluster", "data", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/supervisor.py#L379-L383
train
kwikteam/phy
phy/cluster/supervisor.py
Supervisor._update_similarity_view
def _update_similarity_view(self): """Update the similarity view with matches for the specified clusters.""" if not self.similarity: return selection = self.cluster_view.selected if not len(selection): return cluster_id = selection[0] cluster_ids = self.clustering.cluster_ids self._best = cluster_id logger.log(5, "Update the similarity view.") # This is a list of pairs (closest_cluster, similarity). similarities = self.similarity(cluster_id) # We save the similarity values wrt the currently-selected clusters. # Note that we keep the order of the output of the self.similary() # function. clusters_sim = OrderedDict([(int(cl), s) for (cl, s) in similarities]) # List of similar clusters, remove non-existing ones. clusters = [c for c in clusters_sim.keys() if c in cluster_ids] # The similarity view will use these values. self._current_similarity_values = clusters_sim # Set the rows of the similarity view. # TODO: instead of the self._current_similarity_values hack, # give the possibility to specify the values here (?). self.similarity_view.set_rows([c for c in clusters if c not in selection])
python
def _update_similarity_view(self): """Update the similarity view with matches for the specified clusters.""" if not self.similarity: return selection = self.cluster_view.selected if not len(selection): return cluster_id = selection[0] cluster_ids = self.clustering.cluster_ids self._best = cluster_id logger.log(5, "Update the similarity view.") # This is a list of pairs (closest_cluster, similarity). similarities = self.similarity(cluster_id) # We save the similarity values wrt the currently-selected clusters. # Note that we keep the order of the output of the self.similary() # function. clusters_sim = OrderedDict([(int(cl), s) for (cl, s) in similarities]) # List of similar clusters, remove non-existing ones. clusters = [c for c in clusters_sim.keys() if c in cluster_ids] # The similarity view will use these values. self._current_similarity_values = clusters_sim # Set the rows of the similarity view. # TODO: instead of the self._current_similarity_values hack, # give the possibility to specify the values here (?). self.similarity_view.set_rows([c for c in clusters if c not in selection])
[ "def", "_update_similarity_view", "(", "self", ")", ":", "if", "not", "self", ".", "similarity", ":", "return", "selection", "=", "self", ".", "cluster_view", ".", "selected", "if", "not", "len", "(", "selection", ")", ":", "return", "cluster_id", "=", "selection", "[", "0", "]", "cluster_ids", "=", "self", ".", "clustering", ".", "cluster_ids", "self", ".", "_best", "=", "cluster_id", "logger", ".", "log", "(", "5", ",", "\"Update the similarity view.\"", ")", "# This is a list of pairs (closest_cluster, similarity).", "similarities", "=", "self", ".", "similarity", "(", "cluster_id", ")", "# We save the similarity values wrt the currently-selected clusters.", "# Note that we keep the order of the output of the self.similary()", "# function.", "clusters_sim", "=", "OrderedDict", "(", "[", "(", "int", "(", "cl", ")", ",", "s", ")", "for", "(", "cl", ",", "s", ")", "in", "similarities", "]", ")", "# List of similar clusters, remove non-existing ones.", "clusters", "=", "[", "c", "for", "c", "in", "clusters_sim", ".", "keys", "(", ")", "if", "c", "in", "cluster_ids", "]", "# The similarity view will use these values.", "self", ".", "_current_similarity_values", "=", "clusters_sim", "# Set the rows of the similarity view.", "# TODO: instead of the self._current_similarity_values hack,", "# give the possibility to specify the values here (?).", "self", ".", "similarity_view", ".", "set_rows", "(", "[", "c", "for", "c", "in", "clusters", "if", "c", "not", "in", "selection", "]", ")" ]
Update the similarity view with matches for the specified clusters.
[ "Update", "the", "similarity", "view", "with", "matches", "for", "the", "specified", "clusters", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/supervisor.py#L385-L412
train
kwikteam/phy
phy/cluster/supervisor.py
Supervisor.on_cluster
def on_cluster(self, up): """Update the cluster views after clustering actions.""" similar = self.similarity_view.selected # Reinitialize the cluster view if clusters have changed. if up.added: self._update_cluster_view() # Select all new clusters in view 1. if up.history == 'undo': # Select the clusters that were selected before the undone # action. clusters_0, clusters_1 = up.undo_state[0]['selection'] # Select rows in the tables. self.cluster_view.select(clusters_0, up=up) self.similarity_view.select(clusters_1, up=up) elif up.added: if up.description == 'assign': # NOTE: we change the order such that the last selected # cluster (with a new color) is the split cluster. added = list(up.added[1:]) + [up.added[0]] else: added = up.added # Select the new clusters in the cluster view. self.cluster_view.select(added, up=up) if similar: self.similarity_view.next() elif up.metadata_changed: # Select next in similarity view if all moved are in that view. if set(up.metadata_changed) <= set(similar): next_cluster = self.similarity_view.get_next_id() self._update_similarity_view() if next_cluster is not None: # Select the cluster in the similarity view. self.similarity_view.select([next_cluster]) # Otherwise, select next in cluster view. else: self._update_cluster_view() # Determine if there is a next cluster set from a # previous clustering action. cluster = up.metadata_changed[0] next_cluster = self.cluster_meta.get('next_cluster', cluster) logger.debug("Get next_cluster for %d: %s.", cluster, next_cluster) # If there is not, fallback on the next cluster in the list. if next_cluster is None: self.cluster_view.select([cluster], do_emit=False) self.cluster_view.next() else: self.cluster_view.select([next_cluster])
python
def on_cluster(self, up): """Update the cluster views after clustering actions.""" similar = self.similarity_view.selected # Reinitialize the cluster view if clusters have changed. if up.added: self._update_cluster_view() # Select all new clusters in view 1. if up.history == 'undo': # Select the clusters that were selected before the undone # action. clusters_0, clusters_1 = up.undo_state[0]['selection'] # Select rows in the tables. self.cluster_view.select(clusters_0, up=up) self.similarity_view.select(clusters_1, up=up) elif up.added: if up.description == 'assign': # NOTE: we change the order such that the last selected # cluster (with a new color) is the split cluster. added = list(up.added[1:]) + [up.added[0]] else: added = up.added # Select the new clusters in the cluster view. self.cluster_view.select(added, up=up) if similar: self.similarity_view.next() elif up.metadata_changed: # Select next in similarity view if all moved are in that view. if set(up.metadata_changed) <= set(similar): next_cluster = self.similarity_view.get_next_id() self._update_similarity_view() if next_cluster is not None: # Select the cluster in the similarity view. self.similarity_view.select([next_cluster]) # Otherwise, select next in cluster view. else: self._update_cluster_view() # Determine if there is a next cluster set from a # previous clustering action. cluster = up.metadata_changed[0] next_cluster = self.cluster_meta.get('next_cluster', cluster) logger.debug("Get next_cluster for %d: %s.", cluster, next_cluster) # If there is not, fallback on the next cluster in the list. if next_cluster is None: self.cluster_view.select([cluster], do_emit=False) self.cluster_view.next() else: self.cluster_view.select([next_cluster])
[ "def", "on_cluster", "(", "self", ",", "up", ")", ":", "similar", "=", "self", ".", "similarity_view", ".", "selected", "# Reinitialize the cluster view if clusters have changed.", "if", "up", ".", "added", ":", "self", ".", "_update_cluster_view", "(", ")", "# Select all new clusters in view 1.", "if", "up", ".", "history", "==", "'undo'", ":", "# Select the clusters that were selected before the undone", "# action.", "clusters_0", ",", "clusters_1", "=", "up", ".", "undo_state", "[", "0", "]", "[", "'selection'", "]", "# Select rows in the tables.", "self", ".", "cluster_view", ".", "select", "(", "clusters_0", ",", "up", "=", "up", ")", "self", ".", "similarity_view", ".", "select", "(", "clusters_1", ",", "up", "=", "up", ")", "elif", "up", ".", "added", ":", "if", "up", ".", "description", "==", "'assign'", ":", "# NOTE: we change the order such that the last selected", "# cluster (with a new color) is the split cluster.", "added", "=", "list", "(", "up", ".", "added", "[", "1", ":", "]", ")", "+", "[", "up", ".", "added", "[", "0", "]", "]", "else", ":", "added", "=", "up", ".", "added", "# Select the new clusters in the cluster view.", "self", ".", "cluster_view", ".", "select", "(", "added", ",", "up", "=", "up", ")", "if", "similar", ":", "self", ".", "similarity_view", ".", "next", "(", ")", "elif", "up", ".", "metadata_changed", ":", "# Select next in similarity view if all moved are in that view.", "if", "set", "(", "up", ".", "metadata_changed", ")", "<=", "set", "(", "similar", ")", ":", "next_cluster", "=", "self", ".", "similarity_view", ".", "get_next_id", "(", ")", "self", ".", "_update_similarity_view", "(", ")", "if", "next_cluster", "is", "not", "None", ":", "# Select the cluster in the similarity view.", "self", ".", "similarity_view", ".", "select", "(", "[", "next_cluster", "]", ")", "# Otherwise, select next in cluster view.", "else", ":", "self", ".", "_update_cluster_view", "(", ")", "# Determine if there is a next cluster set from a", "# previous clustering action.", "cluster", "=", "up", ".", "metadata_changed", "[", "0", "]", "next_cluster", "=", "self", ".", "cluster_meta", ".", "get", "(", "'next_cluster'", ",", "cluster", ")", "logger", ".", "debug", "(", "\"Get next_cluster for %d: %s.\"", ",", "cluster", ",", "next_cluster", ")", "# If there is not, fallback on the next cluster in the list.", "if", "next_cluster", "is", "None", ":", "self", ".", "cluster_view", ".", "select", "(", "[", "cluster", "]", ",", "do_emit", "=", "False", ")", "self", ".", "cluster_view", ".", "next", "(", ")", "else", ":", "self", ".", "cluster_view", ".", "select", "(", "[", "next_cluster", "]", ")" ]
Update the cluster views after clustering actions.
[ "Update", "the", "cluster", "views", "after", "clustering", "actions", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/supervisor.py#L438-L488
train
kwikteam/phy
phy/cluster/supervisor.py
Supervisor.select
def select(self, *cluster_ids): """Select a list of clusters.""" # HACK: allow for `select(1, 2, 3)` in addition to `select([1, 2, 3])` # This makes it more convenient to select multiple clusters with # the snippet: `:c 1 2 3` instead of `:c 1,2,3`. if cluster_ids and isinstance(cluster_ids[0], (tuple, list)): cluster_ids = list(cluster_ids[0]) + list(cluster_ids[1:]) # Remove non-existing clusters from the selection. cluster_ids = self._keep_existing_clusters(cluster_ids) # Update the cluster view selection. self.cluster_view.select(cluster_ids)
python
def select(self, *cluster_ids): """Select a list of clusters.""" # HACK: allow for `select(1, 2, 3)` in addition to `select([1, 2, 3])` # This makes it more convenient to select multiple clusters with # the snippet: `:c 1 2 3` instead of `:c 1,2,3`. if cluster_ids and isinstance(cluster_ids[0], (tuple, list)): cluster_ids = list(cluster_ids[0]) + list(cluster_ids[1:]) # Remove non-existing clusters from the selection. cluster_ids = self._keep_existing_clusters(cluster_ids) # Update the cluster view selection. self.cluster_view.select(cluster_ids)
[ "def", "select", "(", "self", ",", "*", "cluster_ids", ")", ":", "# HACK: allow for `select(1, 2, 3)` in addition to `select([1, 2, 3])`", "# This makes it more convenient to select multiple clusters with", "# the snippet: `:c 1 2 3` instead of `:c 1,2,3`.", "if", "cluster_ids", "and", "isinstance", "(", "cluster_ids", "[", "0", "]", ",", "(", "tuple", ",", "list", ")", ")", ":", "cluster_ids", "=", "list", "(", "cluster_ids", "[", "0", "]", ")", "+", "list", "(", "cluster_ids", "[", "1", ":", "]", ")", "# Remove non-existing clusters from the selection.", "cluster_ids", "=", "self", ".", "_keep_existing_clusters", "(", "cluster_ids", ")", "# Update the cluster view selection.", "self", ".", "cluster_view", ".", "select", "(", "cluster_ids", ")" ]
Select a list of clusters.
[ "Select", "a", "list", "of", "clusters", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/supervisor.py#L556-L566
train
kwikteam/phy
phy/cluster/supervisor.py
Supervisor.merge
def merge(self, cluster_ids=None, to=None): """Merge the selected clusters.""" if cluster_ids is None: cluster_ids = self.selected if len(cluster_ids or []) <= 1: return self.clustering.merge(cluster_ids, to=to) self._global_history.action(self.clustering)
python
def merge(self, cluster_ids=None, to=None): """Merge the selected clusters.""" if cluster_ids is None: cluster_ids = self.selected if len(cluster_ids or []) <= 1: return self.clustering.merge(cluster_ids, to=to) self._global_history.action(self.clustering)
[ "def", "merge", "(", "self", ",", "cluster_ids", "=", "None", ",", "to", "=", "None", ")", ":", "if", "cluster_ids", "is", "None", ":", "cluster_ids", "=", "self", ".", "selected", "if", "len", "(", "cluster_ids", "or", "[", "]", ")", "<=", "1", ":", "return", "self", ".", "clustering", ".", "merge", "(", "cluster_ids", ",", "to", "=", "to", ")", "self", ".", "_global_history", ".", "action", "(", "self", ".", "clustering", ")" ]
Merge the selected clusters.
[ "Merge", "the", "selected", "clusters", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/supervisor.py#L575-L582
train
kwikteam/phy
phy/cluster/supervisor.py
Supervisor.split
def split(self, spike_ids=None, spike_clusters_rel=0): """Split the selected spikes.""" if spike_ids is None: spike_ids = self.emit('request_split', single=True) spike_ids = np.asarray(spike_ids, dtype=np.int64) assert spike_ids.dtype == np.int64 assert spike_ids.ndim == 1 if len(spike_ids) == 0: msg = ("You first need to select spikes in the feature " "view with a few Ctrl+Click around the spikes " "that you want to split.") self.emit('error', msg) return self.clustering.split(spike_ids, spike_clusters_rel=spike_clusters_rel) self._global_history.action(self.clustering)
python
def split(self, spike_ids=None, spike_clusters_rel=0): """Split the selected spikes.""" if spike_ids is None: spike_ids = self.emit('request_split', single=True) spike_ids = np.asarray(spike_ids, dtype=np.int64) assert spike_ids.dtype == np.int64 assert spike_ids.ndim == 1 if len(spike_ids) == 0: msg = ("You first need to select spikes in the feature " "view with a few Ctrl+Click around the spikes " "that you want to split.") self.emit('error', msg) return self.clustering.split(spike_ids, spike_clusters_rel=spike_clusters_rel) self._global_history.action(self.clustering)
[ "def", "split", "(", "self", ",", "spike_ids", "=", "None", ",", "spike_clusters_rel", "=", "0", ")", ":", "if", "spike_ids", "is", "None", ":", "spike_ids", "=", "self", ".", "emit", "(", "'request_split'", ",", "single", "=", "True", ")", "spike_ids", "=", "np", ".", "asarray", "(", "spike_ids", ",", "dtype", "=", "np", ".", "int64", ")", "assert", "spike_ids", ".", "dtype", "==", "np", ".", "int64", "assert", "spike_ids", ".", "ndim", "==", "1", "if", "len", "(", "spike_ids", ")", "==", "0", ":", "msg", "=", "(", "\"You first need to select spikes in the feature \"", "\"view with a few Ctrl+Click around the spikes \"", "\"that you want to split.\"", ")", "self", ".", "emit", "(", "'error'", ",", "msg", ")", "return", "self", ".", "clustering", ".", "split", "(", "spike_ids", ",", "spike_clusters_rel", "=", "spike_clusters_rel", ")", "self", ".", "_global_history", ".", "action", "(", "self", ".", "clustering", ")" ]
Split the selected spikes.
[ "Split", "the", "selected", "spikes", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/supervisor.py#L584-L599
train
kwikteam/phy
phy/cluster/supervisor.py
Supervisor.get_labels
def get_labels(self, field): """Return the labels of all clusters, for a given field.""" return {c: self.cluster_meta.get(field, c) for c in self.clustering.cluster_ids}
python
def get_labels(self, field): """Return the labels of all clusters, for a given field.""" return {c: self.cluster_meta.get(field, c) for c in self.clustering.cluster_ids}
[ "def", "get_labels", "(", "self", ",", "field", ")", ":", "return", "{", "c", ":", "self", ".", "cluster_meta", ".", "get", "(", "field", ",", "c", ")", "for", "c", "in", "self", ".", "clustering", ".", "cluster_ids", "}" ]
Return the labels of all clusters, for a given field.
[ "Return", "the", "labels", "of", "all", "clusters", "for", "a", "given", "field", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/supervisor.py#L610-L613
train
kwikteam/phy
phy/cluster/supervisor.py
Supervisor.label
def label(self, name, value, cluster_ids=None): """Assign a label to clusters. Example: `quality 3` """ if cluster_ids is None: cluster_ids = self.cluster_view.selected if not hasattr(cluster_ids, '__len__'): cluster_ids = [cluster_ids] if len(cluster_ids) == 0: return self.cluster_meta.set(name, cluster_ids, value) self._global_history.action(self.cluster_meta)
python
def label(self, name, value, cluster_ids=None): """Assign a label to clusters. Example: `quality 3` """ if cluster_ids is None: cluster_ids = self.cluster_view.selected if not hasattr(cluster_ids, '__len__'): cluster_ids = [cluster_ids] if len(cluster_ids) == 0: return self.cluster_meta.set(name, cluster_ids, value) self._global_history.action(self.cluster_meta)
[ "def", "label", "(", "self", ",", "name", ",", "value", ",", "cluster_ids", "=", "None", ")", ":", "if", "cluster_ids", "is", "None", ":", "cluster_ids", "=", "self", ".", "cluster_view", ".", "selected", "if", "not", "hasattr", "(", "cluster_ids", ",", "'__len__'", ")", ":", "cluster_ids", "=", "[", "cluster_ids", "]", "if", "len", "(", "cluster_ids", ")", "==", "0", ":", "return", "self", ".", "cluster_meta", ".", "set", "(", "name", ",", "cluster_ids", ",", "value", ")", "self", ".", "_global_history", ".", "action", "(", "self", ".", "cluster_meta", ")" ]
Assign a label to clusters. Example: `quality 3`
[ "Assign", "a", "label", "to", "clusters", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/supervisor.py#L615-L628
train
kwikteam/phy
phy/cluster/supervisor.py
Supervisor.move
def move(self, group, cluster_ids=None): """Assign a group to some clusters. Example: `good` """ if isinstance(cluster_ids, string_types): logger.warn("The list of clusters should be a list of integers, " "not a string.") return self.label('group', group, cluster_ids=cluster_ids)
python
def move(self, group, cluster_ids=None): """Assign a group to some clusters. Example: `good` """ if isinstance(cluster_ids, string_types): logger.warn("The list of clusters should be a list of integers, " "not a string.") return self.label('group', group, cluster_ids=cluster_ids)
[ "def", "move", "(", "self", ",", "group", ",", "cluster_ids", "=", "None", ")", ":", "if", "isinstance", "(", "cluster_ids", ",", "string_types", ")", ":", "logger", ".", "warn", "(", "\"The list of clusters should be a list of integers, \"", "\"not a string.\"", ")", "return", "self", ".", "label", "(", "'group'", ",", "group", ",", "cluster_ids", "=", "cluster_ids", ")" ]
Assign a group to some clusters. Example: `good`
[ "Assign", "a", "group", "to", "some", "clusters", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/supervisor.py#L630-L640
train
kwikteam/phy
phy/cluster/supervisor.py
Supervisor.next
def next(self): """Select the next cluster.""" if not self.selected: self.cluster_view.next() else: self.similarity_view.next()
python
def next(self): """Select the next cluster.""" if not self.selected: self.cluster_view.next() else: self.similarity_view.next()
[ "def", "next", "(", "self", ")", ":", "if", "not", "self", ".", "selected", ":", "self", ".", "cluster_view", ".", "next", "(", ")", "else", ":", "self", ".", "similarity_view", ".", "next", "(", ")" ]
Select the next cluster.
[ "Select", "the", "next", "cluster", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/supervisor.py#L670-L675
train
kwikteam/phy
phy/cluster/supervisor.py
Supervisor.save
def save(self): """Save the manual clustering back to disk.""" spike_clusters = self.clustering.spike_clusters groups = {c: self.cluster_meta.get('group', c) or 'unsorted' for c in self.clustering.cluster_ids} # List of tuples (field_name, dictionary). labels = [(field, self.get_labels(field)) for field in self.cluster_meta.fields if field not in ('next_cluster')] # TODO: add option in add_field to declare a field unsavable. self.emit('request_save', spike_clusters, groups, *labels) # Cache the spikes_per_cluster array. self._save_spikes_per_cluster()
python
def save(self): """Save the manual clustering back to disk.""" spike_clusters = self.clustering.spike_clusters groups = {c: self.cluster_meta.get('group', c) or 'unsorted' for c in self.clustering.cluster_ids} # List of tuples (field_name, dictionary). labels = [(field, self.get_labels(field)) for field in self.cluster_meta.fields if field not in ('next_cluster')] # TODO: add option in add_field to declare a field unsavable. self.emit('request_save', spike_clusters, groups, *labels) # Cache the spikes_per_cluster array. self._save_spikes_per_cluster()
[ "def", "save", "(", "self", ")", ":", "spike_clusters", "=", "self", ".", "clustering", ".", "spike_clusters", "groups", "=", "{", "c", ":", "self", ".", "cluster_meta", ".", "get", "(", "'group'", ",", "c", ")", "or", "'unsorted'", "for", "c", "in", "self", ".", "clustering", ".", "cluster_ids", "}", "# List of tuples (field_name, dictionary).", "labels", "=", "[", "(", "field", ",", "self", ".", "get_labels", "(", "field", ")", ")", "for", "field", "in", "self", ".", "cluster_meta", ".", "fields", "if", "field", "not", "in", "(", "'next_cluster'", ")", "]", "# TODO: add option in add_field to declare a field unsavable.", "self", ".", "emit", "(", "'request_save'", ",", "spike_clusters", ",", "groups", ",", "*", "labels", ")", "# Cache the spikes_per_cluster array.", "self", ".", "_save_spikes_per_cluster", "(", ")" ]
Save the manual clustering back to disk.
[ "Save", "the", "manual", "clustering", "back", "to", "disk", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/supervisor.py#L692-L704
train
kwikteam/phy
phy/cluster/_utils.py
create_cluster_meta
def create_cluster_meta(cluster_groups): """Return a ClusterMeta instance with cluster group support.""" meta = ClusterMeta() meta.add_field('group') cluster_groups = cluster_groups or {} data = {c: {'group': v} for c, v in cluster_groups.items()} meta.from_dict(data) return meta
python
def create_cluster_meta(cluster_groups): """Return a ClusterMeta instance with cluster group support.""" meta = ClusterMeta() meta.add_field('group') cluster_groups = cluster_groups or {} data = {c: {'group': v} for c, v in cluster_groups.items()} meta.from_dict(data) return meta
[ "def", "create_cluster_meta", "(", "cluster_groups", ")", ":", "meta", "=", "ClusterMeta", "(", ")", "meta", ".", "add_field", "(", "'group'", ")", "cluster_groups", "=", "cluster_groups", "or", "{", "}", "data", "=", "{", "c", ":", "{", "'group'", ":", "v", "}", "for", "c", ",", "v", "in", "cluster_groups", ".", "items", "(", ")", "}", "meta", ".", "from_dict", "(", "data", ")", "return", "meta" ]
Return a ClusterMeta instance with cluster group support.
[ "Return", "a", "ClusterMeta", "instance", "with", "cluster", "group", "support", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/_utils.py#L35-L44
train
kwikteam/phy
phy/cluster/_utils.py
ClusterMeta.add_field
def add_field(self, name, default_value=None): """Add a field with an optional default value.""" self._fields[name] = default_value def func(cluster): return self.get(name, cluster) setattr(self, name, func)
python
def add_field(self, name, default_value=None): """Add a field with an optional default value.""" self._fields[name] = default_value def func(cluster): return self.get(name, cluster) setattr(self, name, func)
[ "def", "add_field", "(", "self", ",", "name", ",", "default_value", "=", "None", ")", ":", "self", ".", "_fields", "[", "name", "]", "=", "default_value", "def", "func", "(", "cluster", ")", ":", "return", "self", ".", "get", "(", "name", ",", "cluster", ")", "setattr", "(", "self", ",", "name", ",", "func", ")" ]
Add a field with an optional default value.
[ "Add", "a", "field", "with", "an", "optional", "default", "value", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/_utils.py#L116-L123
train
kwikteam/phy
phy/cluster/_utils.py
ClusterMeta.set
def set(self, field, clusters, value, add_to_stack=True): """Set the value of one of several clusters.""" # Add the field if it doesn't exist. if field not in self._fields: self.add_field(field) assert field in self._fields clusters = _as_list(clusters) for cluster in clusters: if cluster not in self._data: self._data[cluster] = {} self._data[cluster][field] = value up = UpdateInfo(description='metadata_' + field, metadata_changed=clusters, metadata_value=value, ) undo_state = self.emit('request_undo_state', up) if add_to_stack: self._undo_stack.add((clusters, field, value, up, undo_state)) self.emit('cluster', up) return up
python
def set(self, field, clusters, value, add_to_stack=True): """Set the value of one of several clusters.""" # Add the field if it doesn't exist. if field not in self._fields: self.add_field(field) assert field in self._fields clusters = _as_list(clusters) for cluster in clusters: if cluster not in self._data: self._data[cluster] = {} self._data[cluster][field] = value up = UpdateInfo(description='metadata_' + field, metadata_changed=clusters, metadata_value=value, ) undo_state = self.emit('request_undo_state', up) if add_to_stack: self._undo_stack.add((clusters, field, value, up, undo_state)) self.emit('cluster', up) return up
[ "def", "set", "(", "self", ",", "field", ",", "clusters", ",", "value", ",", "add_to_stack", "=", "True", ")", ":", "# Add the field if it doesn't exist.", "if", "field", "not", "in", "self", ".", "_fields", ":", "self", ".", "add_field", "(", "field", ")", "assert", "field", "in", "self", ".", "_fields", "clusters", "=", "_as_list", "(", "clusters", ")", "for", "cluster", "in", "clusters", ":", "if", "cluster", "not", "in", "self", ".", "_data", ":", "self", ".", "_data", "[", "cluster", "]", "=", "{", "}", "self", ".", "_data", "[", "cluster", "]", "[", "field", "]", "=", "value", "up", "=", "UpdateInfo", "(", "description", "=", "'metadata_'", "+", "field", ",", "metadata_changed", "=", "clusters", ",", "metadata_value", "=", "value", ",", ")", "undo_state", "=", "self", ".", "emit", "(", "'request_undo_state'", ",", "up", ")", "if", "add_to_stack", ":", "self", ".", "_undo_stack", ".", "add", "(", "(", "clusters", ",", "field", ",", "value", ",", "up", ",", "undo_state", ")", ")", "self", ".", "emit", "(", "'cluster'", ",", "up", ")", "return", "up" ]
Set the value of one of several clusters.
[ "Set", "the", "value", "of", "one", "of", "several", "clusters", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/_utils.py#L140-L163
train
kwikteam/phy
phy/cluster/_utils.py
ClusterMeta.get
def get(self, field, cluster): """Retrieve the value of one cluster.""" if _is_list(cluster): return [self.get(field, c) for c in cluster] assert field in self._fields default = self._fields[field] return self._data.get(cluster, {}).get(field, default)
python
def get(self, field, cluster): """Retrieve the value of one cluster.""" if _is_list(cluster): return [self.get(field, c) for c in cluster] assert field in self._fields default = self._fields[field] return self._data.get(cluster, {}).get(field, default)
[ "def", "get", "(", "self", ",", "field", ",", "cluster", ")", ":", "if", "_is_list", "(", "cluster", ")", ":", "return", "[", "self", ".", "get", "(", "field", ",", "c", ")", "for", "c", "in", "cluster", "]", "assert", "field", "in", "self", ".", "_fields", "default", "=", "self", ".", "_fields", "[", "field", "]", "return", "self", ".", "_data", ".", "get", "(", "cluster", ",", "{", "}", ")", ".", "get", "(", "field", ",", "default", ")" ]
Retrieve the value of one cluster.
[ "Retrieve", "the", "value", "of", "one", "cluster", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/_utils.py#L165-L171
train
kwikteam/phy
phy/cluster/_utils.py
ClusterMeta.set_from_descendants
def set_from_descendants(self, descendants): """Update metadata of some clusters given the metadata of their ascendants.""" for field in self.fields: # This gives a set of metadata values of all the parents # of any new cluster. candidates = defaultdict(set) for old, new in descendants: candidates[new].add(self.get(field, old)) # Loop over all new clusters. for new, vals in candidates.items(): vals = list(vals) default = self._fields[field] # If all the parents have the same value, assign it to # the new cluster if it is not the default. if len(vals) == 1 and vals[0] != default: self.set(field, new, vals[0])
python
def set_from_descendants(self, descendants): """Update metadata of some clusters given the metadata of their ascendants.""" for field in self.fields: # This gives a set of metadata values of all the parents # of any new cluster. candidates = defaultdict(set) for old, new in descendants: candidates[new].add(self.get(field, old)) # Loop over all new clusters. for new, vals in candidates.items(): vals = list(vals) default = self._fields[field] # If all the parents have the same value, assign it to # the new cluster if it is not the default. if len(vals) == 1 and vals[0] != default: self.set(field, new, vals[0])
[ "def", "set_from_descendants", "(", "self", ",", "descendants", ")", ":", "for", "field", "in", "self", ".", "fields", ":", "# This gives a set of metadata values of all the parents", "# of any new cluster.", "candidates", "=", "defaultdict", "(", "set", ")", "for", "old", ",", "new", "in", "descendants", ":", "candidates", "[", "new", "]", ".", "add", "(", "self", ".", "get", "(", "field", ",", "old", ")", ")", "# Loop over all new clusters.", "for", "new", ",", "vals", "in", "candidates", ".", "items", "(", ")", ":", "vals", "=", "list", "(", "vals", ")", "default", "=", "self", ".", "_fields", "[", "field", "]", "# If all the parents have the same value, assign it to", "# the new cluster if it is not the default.", "if", "len", "(", "vals", ")", "==", "1", "and", "vals", "[", "0", "]", "!=", "default", ":", "self", ".", "set", "(", "field", ",", "new", ",", "vals", "[", "0", "]", ")" ]
Update metadata of some clusters given the metadata of their ascendants.
[ "Update", "metadata", "of", "some", "clusters", "given", "the", "metadata", "of", "their", "ascendants", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/_utils.py#L173-L191
train
kwikteam/phy
phy/cluster/_utils.py
ClusterMeta.undo
def undo(self): """Undo the last metadata change. Returns ------- up : UpdateInfo instance """ args = self._undo_stack.back() if args is None: return self._data = deepcopy(self._data_base) for clusters, field, value, up, undo_state in self._undo_stack: if clusters is not None: self.set(field, clusters, value, add_to_stack=False) # Return the UpdateInfo instance of the undo action. up, undo_state = args[-2:] up.history = 'undo' up.undo_state = undo_state self.emit('cluster', up) return up
python
def undo(self): """Undo the last metadata change. Returns ------- up : UpdateInfo instance """ args = self._undo_stack.back() if args is None: return self._data = deepcopy(self._data_base) for clusters, field, value, up, undo_state in self._undo_stack: if clusters is not None: self.set(field, clusters, value, add_to_stack=False) # Return the UpdateInfo instance of the undo action. up, undo_state = args[-2:] up.history = 'undo' up.undo_state = undo_state self.emit('cluster', up) return up
[ "def", "undo", "(", "self", ")", ":", "args", "=", "self", ".", "_undo_stack", ".", "back", "(", ")", "if", "args", "is", "None", ":", "return", "self", ".", "_data", "=", "deepcopy", "(", "self", ".", "_data_base", ")", "for", "clusters", ",", "field", ",", "value", ",", "up", ",", "undo_state", "in", "self", ".", "_undo_stack", ":", "if", "clusters", "is", "not", "None", ":", "self", ".", "set", "(", "field", ",", "clusters", ",", "value", ",", "add_to_stack", "=", "False", ")", "# Return the UpdateInfo instance of the undo action.", "up", ",", "undo_state", "=", "args", "[", "-", "2", ":", "]", "up", ".", "history", "=", "'undo'", "up", ".", "undo_state", "=", "undo_state", "self", ".", "emit", "(", "'cluster'", ",", "up", ")", "return", "up" ]
Undo the last metadata change. Returns ------- up : UpdateInfo instance
[ "Undo", "the", "last", "metadata", "change", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/_utils.py#L194-L217
train
kwikteam/phy
phy/cluster/_utils.py
ClusterMeta.redo
def redo(self): """Redo the next metadata change. Returns ------- up : UpdateInfo instance """ args = self._undo_stack.forward() if args is None: return clusters, field, value, up, undo_state = args self.set(field, clusters, value, add_to_stack=False) # Return the UpdateInfo instance of the redo action. up.history = 'redo' self.emit('cluster', up) return up
python
def redo(self): """Redo the next metadata change. Returns ------- up : UpdateInfo instance """ args = self._undo_stack.forward() if args is None: return clusters, field, value, up, undo_state = args self.set(field, clusters, value, add_to_stack=False) # Return the UpdateInfo instance of the redo action. up.history = 'redo' self.emit('cluster', up) return up
[ "def", "redo", "(", "self", ")", ":", "args", "=", "self", ".", "_undo_stack", ".", "forward", "(", ")", "if", "args", "is", "None", ":", "return", "clusters", ",", "field", ",", "value", ",", "up", ",", "undo_state", "=", "args", "self", ".", "set", "(", "field", ",", "clusters", ",", "value", ",", "add_to_stack", "=", "False", ")", "# Return the UpdateInfo instance of the redo action.", "up", ".", "history", "=", "'redo'", "self", ".", "emit", "(", "'cluster'", ",", "up", ")", "return", "up" ]
Redo the next metadata change. Returns ------- up : UpdateInfo instance
[ "Redo", "the", "next", "metadata", "change", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/cluster/_utils.py#L219-L237
train
kwikteam/phy
phy/plot/utils.py
_get_boxes
def _get_boxes(pos, size=None, margin=0, keep_aspect_ratio=True): """Generate non-overlapping boxes in NDC from a set of positions.""" # Get x, y. pos = np.asarray(pos, dtype=np.float64) x, y = pos.T x = x[:, np.newaxis] y = y[:, np.newaxis] w, h = size if size is not None else _get_box_size(x, y, margin=margin) x0, y0 = x - w, y - h x1, y1 = x + w, y + h # Renormalize the whole thing by keeping the aspect ratio. x0min, y0min, x1max, y1max = x0.min(), y0.min(), x1.max(), y1.max() if not keep_aspect_ratio: b = (x0min, y0min, x1max, y1max) else: dx = x1max - x0min dy = y1max - y0min if dx > dy: b = (x0min, (y1max + y0min) / 2. - dx / 2., x1max, (y1max + y0min) / 2. + dx / 2.) else: b = ((x1max + x0min) / 2. - dy / 2., y0min, (x1max + x0min) / 2. + dy / 2., y1max) r = Range(from_bounds=b, to_bounds=(-1, -1, 1, 1)) return np.c_[r.apply(np.c_[x0, y0]), r.apply(np.c_[x1, y1])]
python
def _get_boxes(pos, size=None, margin=0, keep_aspect_ratio=True): """Generate non-overlapping boxes in NDC from a set of positions.""" # Get x, y. pos = np.asarray(pos, dtype=np.float64) x, y = pos.T x = x[:, np.newaxis] y = y[:, np.newaxis] w, h = size if size is not None else _get_box_size(x, y, margin=margin) x0, y0 = x - w, y - h x1, y1 = x + w, y + h # Renormalize the whole thing by keeping the aspect ratio. x0min, y0min, x1max, y1max = x0.min(), y0.min(), x1.max(), y1.max() if not keep_aspect_ratio: b = (x0min, y0min, x1max, y1max) else: dx = x1max - x0min dy = y1max - y0min if dx > dy: b = (x0min, (y1max + y0min) / 2. - dx / 2., x1max, (y1max + y0min) / 2. + dx / 2.) else: b = ((x1max + x0min) / 2. - dy / 2., y0min, (x1max + x0min) / 2. + dy / 2., y1max) r = Range(from_bounds=b, to_bounds=(-1, -1, 1, 1)) return np.c_[r.apply(np.c_[x0, y0]), r.apply(np.c_[x1, y1])]
[ "def", "_get_boxes", "(", "pos", ",", "size", "=", "None", ",", "margin", "=", "0", ",", "keep_aspect_ratio", "=", "True", ")", ":", "# Get x, y.", "pos", "=", "np", ".", "asarray", "(", "pos", ",", "dtype", "=", "np", ".", "float64", ")", "x", ",", "y", "=", "pos", ".", "T", "x", "=", "x", "[", ":", ",", "np", ".", "newaxis", "]", "y", "=", "y", "[", ":", ",", "np", ".", "newaxis", "]", "w", ",", "h", "=", "size", "if", "size", "is", "not", "None", "else", "_get_box_size", "(", "x", ",", "y", ",", "margin", "=", "margin", ")", "x0", ",", "y0", "=", "x", "-", "w", ",", "y", "-", "h", "x1", ",", "y1", "=", "x", "+", "w", ",", "y", "+", "h", "# Renormalize the whole thing by keeping the aspect ratio.", "x0min", ",", "y0min", ",", "x1max", ",", "y1max", "=", "x0", ".", "min", "(", ")", ",", "y0", ".", "min", "(", ")", ",", "x1", ".", "max", "(", ")", ",", "y1", ".", "max", "(", ")", "if", "not", "keep_aspect_ratio", ":", "b", "=", "(", "x0min", ",", "y0min", ",", "x1max", ",", "y1max", ")", "else", ":", "dx", "=", "x1max", "-", "x0min", "dy", "=", "y1max", "-", "y0min", "if", "dx", ">", "dy", ":", "b", "=", "(", "x0min", ",", "(", "y1max", "+", "y0min", ")", "/", "2.", "-", "dx", "/", "2.", ",", "x1max", ",", "(", "y1max", "+", "y0min", ")", "/", "2.", "+", "dx", "/", "2.", ")", "else", ":", "b", "=", "(", "(", "x1max", "+", "x0min", ")", "/", "2.", "-", "dy", "/", "2.", ",", "y0min", ",", "(", "x1max", "+", "x0min", ")", "/", "2.", "+", "dy", "/", "2.", ",", "y1max", ")", "r", "=", "Range", "(", "from_bounds", "=", "b", ",", "to_bounds", "=", "(", "-", "1", ",", "-", "1", ",", "1", ",", "1", ")", ")", "return", "np", ".", "c_", "[", "r", ".", "apply", "(", "np", ".", "c_", "[", "x0", ",", "y0", "]", ")", ",", "r", ".", "apply", "(", "np", ".", "c_", "[", "x1", ",", "y1", "]", ")", "]" ]
Generate non-overlapping boxes in NDC from a set of positions.
[ "Generate", "non", "-", "overlapping", "boxes", "in", "NDC", "from", "a", "set", "of", "positions", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/utils.py#L76-L105
train
kwikteam/phy
phy/plot/utils.py
_get_texture
def _get_texture(arr, default, n_items, from_bounds): """Prepare data to be uploaded as a texture. The from_bounds must be specified. """ if not hasattr(default, '__len__'): # pragma: no cover default = [default] n_cols = len(default) if arr is None: # pragma: no cover arr = np.tile(default, (n_items, 1)) assert arr.shape == (n_items, n_cols) # Convert to 3D texture. arr = arr[np.newaxis, ...].astype(np.float64) assert arr.shape == (1, n_items, n_cols) # NOTE: we need to cast the texture to [0., 1.] (float texture). # This is easy as soon as we assume that the signal bounds are in # [-1, 1]. assert len(from_bounds) == 2 m, M = map(float, from_bounds) assert np.all(arr >= m) assert np.all(arr <= M) arr = (arr - m) / (M - m) assert np.all(arr >= 0) assert np.all(arr <= 1.) return arr
python
def _get_texture(arr, default, n_items, from_bounds): """Prepare data to be uploaded as a texture. The from_bounds must be specified. """ if not hasattr(default, '__len__'): # pragma: no cover default = [default] n_cols = len(default) if arr is None: # pragma: no cover arr = np.tile(default, (n_items, 1)) assert arr.shape == (n_items, n_cols) # Convert to 3D texture. arr = arr[np.newaxis, ...].astype(np.float64) assert arr.shape == (1, n_items, n_cols) # NOTE: we need to cast the texture to [0., 1.] (float texture). # This is easy as soon as we assume that the signal bounds are in # [-1, 1]. assert len(from_bounds) == 2 m, M = map(float, from_bounds) assert np.all(arr >= m) assert np.all(arr <= M) arr = (arr - m) / (M - m) assert np.all(arr >= 0) assert np.all(arr <= 1.) return arr
[ "def", "_get_texture", "(", "arr", ",", "default", ",", "n_items", ",", "from_bounds", ")", ":", "if", "not", "hasattr", "(", "default", ",", "'__len__'", ")", ":", "# pragma: no cover", "default", "=", "[", "default", "]", "n_cols", "=", "len", "(", "default", ")", "if", "arr", "is", "None", ":", "# pragma: no cover", "arr", "=", "np", ".", "tile", "(", "default", ",", "(", "n_items", ",", "1", ")", ")", "assert", "arr", ".", "shape", "==", "(", "n_items", ",", "n_cols", ")", "# Convert to 3D texture.", "arr", "=", "arr", "[", "np", ".", "newaxis", ",", "...", "]", ".", "astype", "(", "np", ".", "float64", ")", "assert", "arr", ".", "shape", "==", "(", "1", ",", "n_items", ",", "n_cols", ")", "# NOTE: we need to cast the texture to [0., 1.] (float texture).", "# This is easy as soon as we assume that the signal bounds are in", "# [-1, 1].", "assert", "len", "(", "from_bounds", ")", "==", "2", "m", ",", "M", "=", "map", "(", "float", ",", "from_bounds", ")", "assert", "np", ".", "all", "(", "arr", ">=", "m", ")", "assert", "np", ".", "all", "(", "arr", "<=", "M", ")", "arr", "=", "(", "arr", "-", "m", ")", "/", "(", "M", "-", "m", ")", "assert", "np", ".", "all", "(", "arr", ">=", "0", ")", "assert", "np", ".", "all", "(", "arr", "<=", "1.", ")", "return", "arr" ]
Prepare data to be uploaded as a texture. The from_bounds must be specified.
[ "Prepare", "data", "to", "be", "uploaded", "as", "a", "texture", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/utils.py#L122-L147
train
kwikteam/phy
phy/plot/utils.py
_get_array
def _get_array(val, shape, default=None, dtype=np.float64): """Ensure an object is an array with the specified shape.""" assert val is not None or default is not None if hasattr(val, '__len__') and len(val) == 0: # pragma: no cover val = None # Do nothing if the array is already correct. if (isinstance(val, np.ndarray) and val.shape == shape and val.dtype == dtype): return val out = np.zeros(shape, dtype=dtype) # This solves `ValueError: could not broadcast input array from shape (n) # into shape (n, 1)`. if val is not None and isinstance(val, np.ndarray): if val.size == out.size: val = val.reshape(out.shape) out.flat[:] = val if val is not None else default assert out.shape == shape return out
python
def _get_array(val, shape, default=None, dtype=np.float64): """Ensure an object is an array with the specified shape.""" assert val is not None or default is not None if hasattr(val, '__len__') and len(val) == 0: # pragma: no cover val = None # Do nothing if the array is already correct. if (isinstance(val, np.ndarray) and val.shape == shape and val.dtype == dtype): return val out = np.zeros(shape, dtype=dtype) # This solves `ValueError: could not broadcast input array from shape (n) # into shape (n, 1)`. if val is not None and isinstance(val, np.ndarray): if val.size == out.size: val = val.reshape(out.shape) out.flat[:] = val if val is not None else default assert out.shape == shape return out
[ "def", "_get_array", "(", "val", ",", "shape", ",", "default", "=", "None", ",", "dtype", "=", "np", ".", "float64", ")", ":", "assert", "val", "is", "not", "None", "or", "default", "is", "not", "None", "if", "hasattr", "(", "val", ",", "'__len__'", ")", "and", "len", "(", "val", ")", "==", "0", ":", "# pragma: no cover", "val", "=", "None", "# Do nothing if the array is already correct.", "if", "(", "isinstance", "(", "val", ",", "np", ".", "ndarray", ")", "and", "val", ".", "shape", "==", "shape", "and", "val", ".", "dtype", "==", "dtype", ")", ":", "return", "val", "out", "=", "np", ".", "zeros", "(", "shape", ",", "dtype", "=", "dtype", ")", "# This solves `ValueError: could not broadcast input array from shape (n)", "# into shape (n, 1)`.", "if", "val", "is", "not", "None", "and", "isinstance", "(", "val", ",", "np", ".", "ndarray", ")", ":", "if", "val", ".", "size", "==", "out", ".", "size", ":", "val", "=", "val", ".", "reshape", "(", "out", ".", "shape", ")", "out", ".", "flat", "[", ":", "]", "=", "val", "if", "val", "is", "not", "None", "else", "default", "assert", "out", ".", "shape", "==", "shape", "return", "out" ]
Ensure an object is an array with the specified shape.
[ "Ensure", "an", "object", "is", "an", "array", "with", "the", "specified", "shape", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/utils.py#L150-L168
train
kwikteam/phy
phy/plot/utils.py
_get_index
def _get_index(n_items, item_size, n): """Prepare an index attribute for GPU uploading.""" index = np.arange(n_items) index = np.repeat(index, item_size) index = index.astype(np.float64) assert index.shape == (n,) return index
python
def _get_index(n_items, item_size, n): """Prepare an index attribute for GPU uploading.""" index = np.arange(n_items) index = np.repeat(index, item_size) index = index.astype(np.float64) assert index.shape == (n,) return index
[ "def", "_get_index", "(", "n_items", ",", "item_size", ",", "n", ")", ":", "index", "=", "np", ".", "arange", "(", "n_items", ")", "index", "=", "np", ".", "repeat", "(", "index", ",", "item_size", ")", "index", "=", "index", ".", "astype", "(", "np", ".", "float64", ")", "assert", "index", ".", "shape", "==", "(", "n", ",", ")", "return", "index" ]
Prepare an index attribute for GPU uploading.
[ "Prepare", "an", "index", "attribute", "for", "GPU", "uploading", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/utils.py#L225-L231
train
kwikteam/phy
phy/plot/utils.py
_load_shader
def _load_shader(filename): """Load a shader file.""" curdir = op.dirname(op.realpath(__file__)) glsl_path = op.join(curdir, 'glsl') path = op.join(glsl_path, filename) with open(path, 'r') as f: return f.read()
python
def _load_shader(filename): """Load a shader file.""" curdir = op.dirname(op.realpath(__file__)) glsl_path = op.join(curdir, 'glsl') path = op.join(glsl_path, filename) with open(path, 'r') as f: return f.read()
[ "def", "_load_shader", "(", "filename", ")", ":", "curdir", "=", "op", ".", "dirname", "(", "op", ".", "realpath", "(", "__file__", ")", ")", "glsl_path", "=", "op", ".", "join", "(", "curdir", ",", "'glsl'", ")", "path", "=", "op", ".", "join", "(", "glsl_path", ",", "filename", ")", "with", "open", "(", "path", ",", "'r'", ")", "as", "f", ":", "return", "f", ".", "read", "(", ")" ]
Load a shader file.
[ "Load", "a", "shader", "file", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/plot/utils.py#L242-L248
train
kwikteam/phy
phy/utils/_color.py
_random_color
def _random_color(h_range=(0., 1.), s_range=(.5, 1.), v_range=(.5, 1.), ): """Generate a random RGB color.""" h, s, v = uniform(*h_range), uniform(*s_range), uniform(*v_range) r, g, b = hsv_to_rgb(np.array([[[h, s, v]]])).flat return r, g, b
python
def _random_color(h_range=(0., 1.), s_range=(.5, 1.), v_range=(.5, 1.), ): """Generate a random RGB color.""" h, s, v = uniform(*h_range), uniform(*s_range), uniform(*v_range) r, g, b = hsv_to_rgb(np.array([[[h, s, v]]])).flat return r, g, b
[ "def", "_random_color", "(", "h_range", "=", "(", "0.", ",", "1.", ")", ",", "s_range", "=", "(", ".5", ",", "1.", ")", ",", "v_range", "=", "(", ".5", ",", "1.", ")", ",", ")", ":", "h", ",", "s", ",", "v", "=", "uniform", "(", "*", "h_range", ")", ",", "uniform", "(", "*", "s_range", ")", ",", "uniform", "(", "*", "v_range", ")", "r", ",", "g", ",", "b", "=", "hsv_to_rgb", "(", "np", ".", "array", "(", "[", "[", "[", "h", ",", "s", ",", "v", "]", "]", "]", ")", ")", ".", "flat", "return", "r", ",", "g", ",", "b" ]
Generate a random RGB color.
[ "Generate", "a", "random", "RGB", "color", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/utils/_color.py#L18-L25
train
kwikteam/phy
phy/utils/_color.py
_is_bright
def _is_bright(rgb): """Return whether a RGB color is bright or not.""" r, g, b = rgb gray = 0.299 * r + 0.587 * g + 0.114 * b return gray >= .5
python
def _is_bright(rgb): """Return whether a RGB color is bright or not.""" r, g, b = rgb gray = 0.299 * r + 0.587 * g + 0.114 * b return gray >= .5
[ "def", "_is_bright", "(", "rgb", ")", ":", "r", ",", "g", ",", "b", "=", "rgb", "gray", "=", "0.299", "*", "r", "+", "0.587", "*", "g", "+", "0.114", "*", "b", "return", "gray", ">=", ".5" ]
Return whether a RGB color is bright or not.
[ "Return", "whether", "a", "RGB", "color", "is", "bright", "or", "not", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/utils/_color.py#L28-L32
train
kwikteam/phy
phy/utils/_types.py
_bunchify
def _bunchify(b): """Ensure all dict elements are Bunch.""" assert isinstance(b, dict) b = Bunch(b) for k in b: if isinstance(b[k], dict): b[k] = Bunch(b[k]) return b
python
def _bunchify(b): """Ensure all dict elements are Bunch.""" assert isinstance(b, dict) b = Bunch(b) for k in b: if isinstance(b[k], dict): b[k] = Bunch(b[k]) return b
[ "def", "_bunchify", "(", "b", ")", ":", "assert", "isinstance", "(", "b", ",", "dict", ")", "b", "=", "Bunch", "(", "b", ")", "for", "k", "in", "b", ":", "if", "isinstance", "(", "b", "[", "k", "]", ",", "dict", ")", ":", "b", "[", "k", "]", "=", "Bunch", "(", "b", "[", "k", "]", ")", "return", "b" ]
Ensure all dict elements are Bunch.
[ "Ensure", "all", "dict", "elements", "are", "Bunch", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/utils/_types.py#L34-L41
train
kwikteam/phy
phy/utils/_types.py
_as_list
def _as_list(obj): """Ensure an object is a list.""" if obj is None: return None elif isinstance(obj, string_types): return [obj] elif isinstance(obj, tuple): return list(obj) elif not hasattr(obj, '__len__'): return [obj] else: return obj
python
def _as_list(obj): """Ensure an object is a list.""" if obj is None: return None elif isinstance(obj, string_types): return [obj] elif isinstance(obj, tuple): return list(obj) elif not hasattr(obj, '__len__'): return [obj] else: return obj
[ "def", "_as_list", "(", "obj", ")", ":", "if", "obj", "is", "None", ":", "return", "None", "elif", "isinstance", "(", "obj", ",", "string_types", ")", ":", "return", "[", "obj", "]", "elif", "isinstance", "(", "obj", ",", "tuple", ")", ":", "return", "list", "(", "obj", ")", "elif", "not", "hasattr", "(", "obj", ",", "'__len__'", ")", ":", "return", "[", "obj", "]", "else", ":", "return", "obj" ]
Ensure an object is a list.
[ "Ensure", "an", "object", "is", "a", "list", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/utils/_types.py#L67-L78
train
kwikteam/phy
phy/utils/_types.py
_as_array
def _as_array(arr, dtype=None): """Convert an object to a numerical NumPy array. Avoid a copy if possible. """ if arr is None: return None if isinstance(arr, np.ndarray) and dtype is None: return arr if isinstance(arr, integer_types + (float,)): arr = [arr] out = np.asarray(arr) if dtype is not None: if out.dtype != dtype: out = out.astype(dtype) if out.dtype not in _ACCEPTED_ARRAY_DTYPES: raise ValueError("'arr' seems to have an invalid dtype: " "{0:s}".format(str(out.dtype))) return out
python
def _as_array(arr, dtype=None): """Convert an object to a numerical NumPy array. Avoid a copy if possible. """ if arr is None: return None if isinstance(arr, np.ndarray) and dtype is None: return arr if isinstance(arr, integer_types + (float,)): arr = [arr] out = np.asarray(arr) if dtype is not None: if out.dtype != dtype: out = out.astype(dtype) if out.dtype not in _ACCEPTED_ARRAY_DTYPES: raise ValueError("'arr' seems to have an invalid dtype: " "{0:s}".format(str(out.dtype))) return out
[ "def", "_as_array", "(", "arr", ",", "dtype", "=", "None", ")", ":", "if", "arr", "is", "None", ":", "return", "None", "if", "isinstance", "(", "arr", ",", "np", ".", "ndarray", ")", "and", "dtype", "is", "None", ":", "return", "arr", "if", "isinstance", "(", "arr", ",", "integer_types", "+", "(", "float", ",", ")", ")", ":", "arr", "=", "[", "arr", "]", "out", "=", "np", ".", "asarray", "(", "arr", ")", "if", "dtype", "is", "not", "None", ":", "if", "out", ".", "dtype", "!=", "dtype", ":", "out", "=", "out", ".", "astype", "(", "dtype", ")", "if", "out", ".", "dtype", "not", "in", "_ACCEPTED_ARRAY_DTYPES", ":", "raise", "ValueError", "(", "\"'arr' seems to have an invalid dtype: \"", "\"{0:s}\"", ".", "format", "(", "str", "(", "out", ".", "dtype", ")", ")", ")", "return", "out" ]
Convert an object to a numerical NumPy array. Avoid a copy if possible.
[ "Convert", "an", "object", "to", "a", "numerical", "NumPy", "array", "." ]
7e9313dc364304b7d2bd03b92938347343703003
https://github.com/kwikteam/phy/blob/7e9313dc364304b7d2bd03b92938347343703003/phy/utils/_types.py#L85-L104
train