code stringlengths 114 1.05M | path stringlengths 3 312 | quality_prob float64 0.5 0.99 | learning_prob float64 0.2 1 | filename stringlengths 3 168 | kind stringclasses 1
value |
|---|---|---|---|---|---|
from abc import ABCMeta
from six import add_metaclass
from abc import abstractmethod
from pyNN.random import RandomDistribution
from pyNN.random import NumpyRNG
from spinn_front_end_common.utilities.utility_objs\
.provenance_data_item import ProvenanceDataItem
from spynnaker.pyNN.utilities import utility_calls
import numpy
import math
import re
@add_metaclass(ABCMeta)
class AbstractConnector(object):
""" Abstract class which PyNN Connectors extend
"""
NUMPY_SYNAPSES_DTYPE = [("source", "uint32"), ("target", "uint16"),
("weight", "float64"), ("delay", "float64"),
("synapse_type", "uint8")]
def __init__(self, safe=True, space=None, verbose=False):
self._safe = safe
self._space = space
self._verbose = verbose
self._pre_population = None
self._post_population = None
self._n_pre_neurons = None
self._n_post_neurons = None
self._rng = None
self._n_clipped_delays = 0
self._min_delay = 0
def set_projection_information(
self, pre_population, post_population, rng, machine_time_step):
self._pre_population = pre_population
self._post_population = post_population
self._n_pre_neurons = pre_population.size
self._n_post_neurons = post_population.size
self._rng = rng
if self._rng is None:
self._rng = NumpyRNG()
self._min_delay = machine_time_step / 1000.0
def _check_parameter(self, values, name, allow_lists=True):
""" Check that the types of the values is supported
"""
if (not numpy.isscalar(values) and
not isinstance(values, RandomDistribution) and
not hasattr(values, "__getitem__")):
raise Exception("Parameter {} format unsupported".format(name))
if not allow_lists and hasattr(values, "__getitem__"):
raise NotImplementedError(
"Lists of {} are not supported the implementation of"
" {} on this platform".format(self.__class__))
def _check_parameters(self, weights, delays, allow_lists=True):
""" Check the types of the weights and delays are supported; lists can\
be disallowed if desired
"""
self._check_parameter(weights, "weights")
self._check_parameter(delays, "delays")
@staticmethod
def _get_delay_maximum(delays, n_connections):
""" Get the maximum delay given a float, RandomDistribution or list of\
delays
"""
if isinstance(delays, RandomDistribution):
max_estimated_delay = utility_calls.get_maximum_probable_value(
delays, n_connections)
if delays.boundaries is not None:
return min(max(delays.boundaries), max_estimated_delay)
return max_estimated_delay
elif numpy.isscalar(delays):
return delays
elif hasattr(delays, "__getitem__"):
return max(delays)
raise Exception("Unrecognised delay format")
@abstractmethod
def get_delay_maximum(self):
""" Get the maximum delay specified by the user in ms, or None if\
unbounded
"""
@staticmethod
def _get_n_connections_from_pre_vertex_with_delay_maximum(
delays, n_total_connections, n_connections, connection_slices,
min_delay, max_delay):
""" Gets the expected number of delays that will fall within min_delay\
and max_delay given given a float, RandomDistribution or list of\
delays
"""
if isinstance(delays, RandomDistribution):
prob_in_range = utility_calls.get_probability_within_range(
delays, min_delay, max_delay)
return int(math.ceil(utility_calls.get_probable_maximum_selected(
n_total_connections, n_connections, prob_in_range)))
elif numpy.isscalar(delays):
if min_delay <= delays <= max_delay:
return int(math.ceil(n_connections))
return 0
elif hasattr(delays, "__getitem__"):
n_delayed = sum([len([
delay for delay in delays[connection_slice]
if min_delay <= delay <= max_delay])
for connection_slice in connection_slices])
n_total = sum([
len(delays[connection_slice])
for connection_slice in connection_slices])
prob_delayed = float(n_delayed) / float(n_total)
return int(math.ceil(utility_calls.get_probable_maximum_selected(
n_total_connections, n_delayed, prob_delayed)))
raise Exception("Unrecognised delay format")
@abstractmethod
def get_n_connections_from_pre_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
min_delay=None, max_delay=None):
""" Get the maximum number of connections between those from each of\
the neurons in the pre_vertex_slice to neurons in the\
post_vertex_slice, for connections with a delay between min_delay\
and max_delay (inclusive) if both specified\
(otherwise all connections)
"""
@abstractmethod
def get_n_connections_to_post_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
""" Get the maximum number of connections between those to each of the\
neurons in the post_vertex_slice from neurons in the\
pre_vertex_slice
"""
@staticmethod
def _get_weight_mean(weights, connection_slices):
""" Get the mean of the weights
"""
if isinstance(weights, RandomDistribution):
return abs(utility_calls.get_mean(weights))
elif numpy.isscalar(weights):
return abs(weights)
elif hasattr(weights, "__getitem__"):
return numpy.mean([
numpy.abs(weights[connection_slice])
for connection_slice in connection_slices])
raise Exception("Unrecognised weight format")
@abstractmethod
def get_weight_mean(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
""" Get the mean of the weights for this connection
"""
@staticmethod
def _get_weight_maximum(weights, n_connections, connection_slices):
""" Get the maximum of the weights
"""
if isinstance(weights, RandomDistribution):
mean_weight = utility_calls.get_mean(weights)
if mean_weight < 0:
min_weight = utility_calls.get_minimum_probable_value(
weights, n_connections)
if weights.boundaries is not None:
return abs(max(min_weight, min(weights.boundaries)))
return abs(min_weight)
else:
max_weight = utility_calls.get_maximum_probable_value(
weights, n_connections)
if weights.boundaries is not None:
return abs(min(max_weight, max(weights.boundaries)))
return abs(max_weight)
elif numpy.isscalar(weights):
return abs(weights)
elif hasattr(weights, "__getitem__"):
return numpy.amax([
numpy.abs(weights[connection_slice])
for connection_slice in connection_slices])
raise Exception("Unrecognised weight format")
@abstractmethod
def get_weight_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
""" Get the maximum of the weights for this connection
"""
@staticmethod
def _get_weight_variance(weights, connection_slices):
""" Get the variance of the weights
"""
if isinstance(weights, RandomDistribution):
return utility_calls.get_variance(weights)
elif numpy.isscalar(weights):
return 0.0
elif hasattr(weights, "__getitem__"):
return numpy.var([
numpy.abs(weights[connection_slice])
for connection_slice in connection_slices])
raise Exception("Unrecognised weight format")
@abstractmethod
def get_weight_variance(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
""" Get the variance of the weights for this connection
"""
def _expand_distances(self, d_expression):
""" Check if a distance expression contains at least one term d[x]. \
If yes, then the distances are expanded to distances in the\
separate coordinates rather than the overall distance over all\
coordinates, and we assume the user has specified an expression\
such as d[0] + d[2].
"""
regexpr = re.compile(r'.*d\[\d*\].*')
if regexpr.match(d_expression):
return True
return False
def _generate_values(self, values, n_connections, connection_slices):
if isinstance(values, RandomDistribution):
if n_connections == 1:
return numpy.array([values.next(n_connections)])
return values.next(n_connections)
elif numpy.isscalar(values):
return numpy.repeat([values], n_connections)
elif hasattr(values, "__getitem__"):
return numpy.concatenate([
values[connection_slice]
for connection_slice in connection_slices])
elif isinstance(values, basestring) or callable(values):
if self._space is None:
raise Exception(
"No space object specified in projection {}-{}".format(
self._pre_population, self._post_population))
expand_distances = True
if isinstance(values, basestring):
expand_distances = self._expand_distances(values)
d = self._space.distances(
self._pre_population.positions,
self._post_population.positions,
expand_distances)
if isinstance(values, basestring):
return eval(values)
return values(d)
def _generate_weights(self, values, n_connections, connection_slices):
""" Generate weight values
"""
weights = self._generate_values(
values, n_connections, connection_slices)
if self._safe:
if numpy.amin(weights) < 0 < numpy.amax(weights):
raise Exception(
"Weights must be either all positive or all negative"
" in projection {}->{}".format(
self._pre_population.label,
self._post_population.label))
return numpy.abs(weights)
def _clip_delays(self, delays):
""" Clip delay values, keeping track of how many have been clipped
"""
# count values that could be clipped
self._n_clipped_delays = numpy.sum(delays < self._min_delay)
# clip values
if numpy.isscalar(delays):
if delays < self._min_delay:
delays = self._min_delay
else:
if delays.size > 0:
delays[delays < self._min_delay] = self._min_delay
return delays
def _generate_delays(self, values, n_connections, connection_slices):
""" Generate valid delay values
"""
delays = self._generate_values(
values, n_connections, connection_slices)
return self._clip_delays(delays)
def _generate_lists_on_host(self, values):
""" Checks if the connector should generate lists on host rather than\
trying to generate the connectivity data on the machine, based on\
the types of the weights and/or delays
"""
# Scalars are fine on the machine
if numpy.isscalar(values):
return True
# Only certain types of random distributions are supported for\
# generation on the machine
if isinstance(values, RandomDistribution):
return values.name in (
"uniform", "uniform_int", "poisson", "normal", "exponential")
return False
@abstractmethod
def generate_on_machine(self):
""" Determines if the connector generation is supported on the machine\
or if the connector must be generated on the host
"""
@abstractmethod
def create_synaptic_block(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
synapse_type):
""" Create a synaptic block from the data
"""
def get_provenance_data(self):
data_items = list()
name = "{}_{}_{}".format(
self._pre_population.label, self._post_population.label,
self.__class__.__name__)
data_items.append(ProvenanceDataItem(
[name, "Times_synaptic_delays_got_clipped"],
self._n_clipped_delays,
report=self._n_clipped_delays > 0,
message=(
"The delays in the connector {} from {} to {} was clipped "
"to {} a total of {} times. This can be avoided by reducing "
"the timestep or increasing the minimum delay to one "
"timestep".format(
self.__class__.__name__, self._pre_population.label,
self._post_population.label, self._min_delay,
self._n_clipped_delays))))
return data_items | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neural_projections/connectors/abstract_connector.py | 0.88258 | 0.281634 | abstract_connector.py | pypi |
from pyNN.random import RandomDistribution
from spynnaker.pyNN.models.neural_projections.connectors.abstract_connector \
import AbstractConnector
import numpy
import logging
logger = logging.getLogger(__file__)
class FixedNumberPreConnector(AbstractConnector):
""" Connects a fixed number of pre-synaptic neurons selected at random,
to all post-synaptic neurons
"""
def __init__(
self, n, weights=0.0, delays=1, allow_self_connections=True,
space=None, safe=True, verbose=False):
"""
:param `int` n:
number of random pre-synaptic neurons connected to output
:param `bool` allow_self_connections:
if the connector is used to connect a
Population to itself, this flag determines whether a neuron is
allowed to connect to itself, or only to other neurons in the
Population.
:param weights:
may either be a float, a !RandomDistribution object, a list/
1D array with at least as many items as connections to be
created. Units nA.
:param delays:
If `None`, all synaptic delays will be set
to the global minimum delay.
:param `pyNN.Space` space:
a Space object, needed if you wish to specify distance-
dependent weights or delays - not implemented
"""
AbstractConnector.__init__(self, safe, space, verbose)
self._n_pre = n
self._weights = weights
self._delays = delays
self._allow_self_connections = allow_self_connections
self._pre_neurons = None
self._check_parameters(weights, delays, allow_lists=False)
if isinstance(n, RandomDistribution):
raise NotImplementedError(
"RandomDistribution is not supported for n in the"
" implementation of FixedNumberPreConnector on this platform")
def get_delay_maximum(self):
return self._get_delay_maximum(
self._delays, self._n_pre * self._n_post_neurons)
def _get_pre_neurons(self):
if self._pre_neurons is None:
self._pre_neurons = numpy.random.choice(
self._n_pre_neurons, self._n_pre, False)
self._pre_neurons.sort()
return self._pre_neurons
def _pre_neurons_in_slice(self, pre_vertex_slice):
pre_neurons = self._get_pre_neurons()
return pre_neurons[
(pre_neurons >= pre_vertex_slice.lo_atom) &
(pre_neurons <= pre_vertex_slice.hi_atom)]
def _is_connected(self, pre_vertex_slice):
return self._pre_neurons_in_slice(pre_vertex_slice).size > 0
def get_n_connections_from_pre_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
min_delay=None, max_delay=None):
if not self._is_connected(pre_vertex_slice):
return 0
if min_delay is None or max_delay is None:
return post_vertex_slice.n_atoms
pre_neurons = self._pre_neurons_in_slice(pre_vertex_slice)
return self._get_n_connections_from_pre_vertex_with_delay_maximum(
self._delays, self._n_pre * self._n_post_neurons,
len(pre_neurons) * post_vertex_slice.n_atoms, None,
min_delay, max_delay)
def get_n_connections_to_post_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
if not self._is_connected(pre_vertex_slice):
return 0
return self._n_pre
def get_weight_mean(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
if not self._is_connected(pre_vertex_slice):
return 0.0
pre_neurons = self._pre_neurons_in_slice(pre_vertex_slice)
n_connections = len(pre_neurons) * post_vertex_slice.n_atoms
return self._get_weight_mean(self._weights, None)
def get_weight_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
if not self._is_connected(pre_vertex_slice):
return 0.0
pre_neurons = self._pre_neurons_in_slice(pre_vertex_slice)
n_connections = len(pre_neurons) * post_vertex_slice.n_atoms
return self._get_weight_maximum(
self._weights, n_connections, None)
def get_weight_variance(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
if not self._is_connected(pre_vertex_slice):
return 0.0
return self._get_weight_variance(self._weights, None)
def generate_on_machine(self):
return (
not self._generate_lists_on_host(self._weights) and
not self._generate_lists_on_host(self._delays))
def create_synaptic_block(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
synapse_type):
if not self._is_connected(pre_vertex_slice):
return numpy.zeros(0, dtype=AbstractConnector.NUMPY_SYNAPSES_DTYPE)
pre_neurons_in_slice = self._pre_neurons_in_slice(pre_vertex_slice)
n_connections = len(pre_neurons_in_slice) * post_vertex_slice.n_atoms
if (not self._allow_self_connections and
pre_vertex_slice is post_vertex_slice):
n_connections -= len(pre_neurons_in_slice)
block = numpy.zeros(
n_connections, dtype=AbstractConnector.NUMPY_SYNAPSES_DTYPE)
if (not self._allow_self_connections and
pre_vertex_slice is post_vertex_slice):
block["source"] = [
pre_index for pre_index in pre_neurons_in_slice
for post_index in range(
post_vertex_slice.lo_atom, post_vertex_slice.hi_atom + 1)
if pre_index != post_index]
block["target"] = [
post_index for pre_index in pre_neurons_in_slice
for post_index in range(
post_vertex_slice.lo_atom, post_vertex_slice.hi_atom + 1)
if pre_index != post_index]
else:
block["source"] = numpy.repeat(
pre_neurons_in_slice, post_vertex_slice.n_atoms)
block["target"] = numpy.tile(numpy.arange(
post_vertex_slice.lo_atom, post_vertex_slice.hi_atom + 1),
len(pre_neurons_in_slice))
block["weight"] = self._generate_weights(
self._weights, n_connections, None)
block["delay"] = self._generate_delays(
self._delays, n_connections, None)
block["synapse_type"] = synapse_type
return block | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neural_projections/connectors/fixed_number_pre_connector.py | 0.87903 | 0.277456 | fixed_number_pre_connector.py | pypi |
from spynnaker.pyNN.models.neural_projections.connectors.abstract_connector \
import AbstractConnector
import logging
import numpy
logger = logging.getLogger(__name__)
class FromListConnector(AbstractConnector):
""" Make connections according to a list.
:param: conn_list:
a list of tuples, one tuple for each connection. Each
tuple should contain::
(pre_idx, post_idx, weight, delay)
where pre_idx is the index (i.e. order in the Population,
not the ID) of the presynaptic neuron, and post_idx is
the index of the postsynaptic neuron.
"""
CONN_LIST_DTYPE = [
("source", "uint32"), ("target", "uint32"),
("weight", "float64"), ("delay", "float64")]
def __init__(self, conn_list, safe=True, verbose=False):
"""
Creates a new FromListConnector.
"""
AbstractConnector.__init__(self, safe, None, verbose)
if conn_list is None or len(conn_list) == 0:
self._conn_list = numpy.zeros(0, dtype=self.CONN_LIST_DTYPE)
else:
temp_conn_list = conn_list
if not isinstance(conn_list[0], tuple):
temp_conn_list = [tuple(items) for items in conn_list]
self._conn_list = numpy.array(
temp_conn_list, dtype=self.CONN_LIST_DTYPE)
def get_delay_maximum(self):
return numpy.max(self._conn_list["delay"])
def get_n_connections_from_pre_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
min_delay=None, max_delay=None):
mask = None
if min_delay is None or max_delay is None:
mask = ((self._conn_list["source"] >= pre_vertex_slice.lo_atom) &
(self._conn_list["source"] <= pre_vertex_slice.hi_atom) &
(self._conn_list["target"] >= post_vertex_slice.lo_atom) &
(self._conn_list["target"] <= post_vertex_slice.hi_atom))
else:
mask = ((self._conn_list["source"] >= pre_vertex_slice.lo_atom) &
(self._conn_list["source"] <= pre_vertex_slice.hi_atom) &
(self._conn_list["target"] >= post_vertex_slice.lo_atom) &
(self._conn_list["target"] <= post_vertex_slice.hi_atom) &
(self._conn_list["delay"] >= min_delay) &
(self._conn_list["delay"] <= max_delay))
sources = self._conn_list["source"][mask]
if sources.size == 0:
return 0
return numpy.max(numpy.bincount(sources))
def get_n_connections_to_post_vertex_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
mask = ((self._conn_list["source"] >= pre_vertex_slice.lo_atom) &
(self._conn_list["source"] <= pre_vertex_slice.hi_atom) &
(self._conn_list["target"] >= post_vertex_slice.lo_atom) &
(self._conn_list["target"] <= post_vertex_slice.hi_atom))
targets = self._conn_list["target"][mask]
if targets.size == 0:
return 0
return numpy.max(numpy.bincount(targets))
def get_weight_mean(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
mask = ((self._conn_list["source"] >= pre_vertex_slice.lo_atom) &
(self._conn_list["source"] <= pre_vertex_slice.hi_atom) &
(self._conn_list["target"] >= post_vertex_slice.lo_atom) &
(self._conn_list["target"] <= post_vertex_slice.hi_atom))
weights = self._conn_list["weight"][mask]
if weights.size == 0:
return 0
return numpy.mean(weights)
def get_weight_maximum(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
mask = ((self._conn_list["source"] >= pre_vertex_slice.lo_atom) &
(self._conn_list["source"] <= pre_vertex_slice.hi_atom) &
(self._conn_list["target"] >= post_vertex_slice.lo_atom) &
(self._conn_list["target"] <= post_vertex_slice.hi_atom))
weights = self._conn_list["weight"][mask]
if weights.size == 0:
return 0
return numpy.max(weights)
def get_weight_variance(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
mask = ((self._conn_list["source"] >= pre_vertex_slice.lo_atom) &
(self._conn_list["source"] <= pre_vertex_slice.hi_atom) &
(self._conn_list["target"] >= post_vertex_slice.lo_atom) &
(self._conn_list["target"] <= post_vertex_slice.hi_atom))
weights = self._conn_list["weight"][mask]
if weights.size == 0:
return 0
return numpy.var(weights)
def generate_on_machine(self):
return False
def create_synaptic_block(
self, pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
synapse_type):
mask = ((self._conn_list["source"] >= pre_vertex_slice.lo_atom) &
(self._conn_list["source"] <= pre_vertex_slice.hi_atom) &
(self._conn_list["target"] >= post_vertex_slice.lo_atom) &
(self._conn_list["target"] <= post_vertex_slice.hi_atom))
items = self._conn_list[mask]
block = numpy.zeros(
items.size, dtype=AbstractConnector.NUMPY_SYNAPSES_DTYPE)
block["source"] = items["source"]
block["target"] = items["target"]
block["weight"] = items["weight"]
block["delay"] = self._clip_delays(items["delay"])
block["synapse_type"] = synapse_type
return block | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neural_projections/connectors/from_list_connector.py | 0.783906 | 0.232158 | from_list_connector.py | pypi |
from spynnaker.pyNN.utilities import conf
from spynnaker.pyNN.utilities import constants
from spynnaker.pyNN.utilities import utility_calls
from spynnaker.pyNN import exceptions
from spynnaker.pyNN.models.neuron import master_pop_table_generators
from spynnaker.pyNN.utilities.running_stats import RunningStats
from spynnaker.pyNN.models.spike_source.spike_source_poisson \
import SpikeSourcePoisson
from spynnaker.pyNN.models.utility_models.delay_extension_vertex \
import DelayExtensionVertex
from spynnaker.pyNN.models.neuron.synapse_io.synapse_io_row_based \
import SynapseIORowBased
from spynnaker.pyNN.models.neural_projections.projection_partitionable_edge \
import ProjectionPartitionableEdge
from spynnaker.pyNN.models.neuron.synapse_dynamics.synapse_dynamics_static \
import SynapseDynamicsStatic
from pacman.model.partitionable_graph.abstract_partitionable_vertex \
import AbstractPartitionableVertex
from pacman.model.graph_mapper.slice import Slice
from data_specification.enums.data_type import DataType
from spinn_front_end_common.utilities import helpful_functions
from scipy import special
from collections import defaultdict
from pyNN.random import RandomDistribution
import math
import sys
import numpy
# TODO: Make sure these values are correct (particularly CPU cycles)
_SYNAPSES_BASE_DTCM_USAGE_IN_BYTES = 28
_SYNAPSES_BASE_SDRAM_USAGE_IN_BYTES = 0
_SYNAPSES_BASE_N_CPU_CYCLES_PER_NEURON = 10
_SYNAPSES_BASE_N_CPU_CYCLES = 8
class SynapticManager(object):
""" Deals with synapses
"""
def __init__(self, synapse_type, machine_time_step, ring_buffer_sigma,
spikes_per_second, population_table_type=None,
synapse_io=None):
self._synapse_type = synapse_type
self._ring_buffer_sigma = ring_buffer_sigma
self._spikes_per_second = spikes_per_second
self._machine_time_step = machine_time_step
# Get the type of population table
self._population_table_type = population_table_type
if population_table_type is None:
population_table_type = ("MasterPopTableAs" + conf.config.get(
"MasterPopTable", "generator"))
algorithms = helpful_functions.get_valid_components(
master_pop_table_generators, "master_pop_table_as")
self._population_table_type = algorithms[population_table_type]()
# Get the synapse IO
self._synapse_io = synapse_io
if synapse_io is None:
self._synapse_io = SynapseIORowBased(machine_time_step)
if self._ring_buffer_sigma is None:
self._ring_buffer_sigma = conf.config.getfloat(
"Simulation", "ring_buffer_sigma")
if self._spikes_per_second is None:
self._spikes_per_second = conf.config.getfloat(
"Simulation", "spikes_per_second")
self._spikes_per_tick = max(
1.0,
self._spikes_per_second /
(1000000.0 / float(self._machine_time_step)))
# Prepare for dealing with STDP - there can only be one (non-static)
# synapse dynamics per vertex at present
self._synapse_dynamics = SynapseDynamicsStatic()
# Keep the details once computed to allow reading back
self._weight_scales = dict()
self._delay_key_index = dict()
self._retrieved_blocks = dict()
# A list of connection holders to be filled in pre-run, indexed by
# the edge the connection is for
self._pre_run_connection_holders = defaultdict(list)
@property
def synapse_dynamics(self):
return self._synapse_dynamics
@synapse_dynamics.setter
def synapse_dynamics(self, synapse_dynamics):
# We can always override static dynamics or None
if isinstance(self._synapse_dynamics, SynapseDynamicsStatic):
self._synapse_dynamics = synapse_dynamics
# We can ignore a static dynamics trying to overwrite a plastic one
elif isinstance(synapse_dynamics, SynapseDynamicsStatic):
pass
# Otherwise, the dynamics must be equal
elif not synapse_dynamics.is_same_as(self._synapse_dynamics):
raise exceptions.SynapticConfigurationException(
"Synapse dynamics must match exactly when using multiple edges"
"to the same population")
@property
def synapse_type(self):
return self._synapse_type
@property
def ring_buffer_sigma(self):
return self._ring_buffer_sigma
@ring_buffer_sigma.setter
def ring_buffer_sigma(self, ring_buffer_sigma):
self._ring_buffer_sigma = ring_buffer_sigma
@property
def spikes_per_second(self):
return self._spikes_per_second
@spikes_per_second.setter
def spikes_per_second(self, spikes_per_second):
self._spikes_per_second = spikes_per_second
@property
def maximum_delay_supported_in_ms(self):
return self._synapse_io.get_maximum_delay_supported_in_ms()
@property
def vertex_executable_suffix(self):
return self._synapse_dynamics.get_vertex_executable_suffix()
def add_pre_run_connection_holder(
self, connection_holder, edge, synapse_info):
self._pre_run_connection_holders[(edge, synapse_info)].append(
connection_holder)
def get_n_cpu_cycles(self, vertex_slice, graph):
# TODO: Calculate this correctly
return 0
def get_dtcm_usage_in_bytes(self, vertex_slice, graph):
# TODO: Calculate this correctly
return 0
def _get_synapse_params_size(self, vertex_slice):
per_neuron_usage = (
self._synapse_type.get_sdram_usage_per_neuron_in_bytes())
return (_SYNAPSES_BASE_SDRAM_USAGE_IN_BYTES +
(per_neuron_usage * vertex_slice.n_atoms) +
(4 * self._synapse_type.get_n_synapse_types()))
def _get_exact_synaptic_blocks_size(
self, post_slices, post_slice_index, post_vertex_slice,
graph_mapper, subvertex, subvertex_in_edges):
""" Get the exact size all of the synaptic blocks
"""
memory_size = 0
# Go through the subedges and add up the memory
for subedge in subvertex_in_edges:
edge = graph_mapper.get_partitionable_edge_from_partitioned_edge(
subedge)
if isinstance(edge, ProjectionPartitionableEdge):
# Add on the size of the tables to be generated
pre_vertex_slice = graph_mapper.get_subvertex_slice(
subedge.pre_subvertex)
pre_slices = graph_mapper.get_subvertex_slices(edge.pre_vertex)
pre_slice_index = graph_mapper.get_subvertex_index(
subedge.pre_subvertex)
memory_size += self._get_size_of_synapse_information(
edge.synapse_information, pre_slices, pre_slice_index,
post_slices, post_slice_index, pre_vertex_slice,
post_vertex_slice, edge.n_delay_stages)
return memory_size
def _get_estimate_synaptic_blocks_size(self, post_vertex_slice, in_edges):
""" Get an estimate of the synaptic blocks memory size
"""
memory_size = 0
for in_edge in in_edges:
if isinstance(in_edge, ProjectionPartitionableEdge):
# Get an estimate of the number of post sub-vertices by
# assuming that all of them are the same size as this one
post_slices = [Slice(
lo_atom, min(
in_edge.post_vertex.n_atoms,
lo_atom + post_vertex_slice.n_atoms - 1))
for lo_atom in range(
0, in_edge.post_vertex.n_atoms,
post_vertex_slice.n_atoms)]
post_slice_index = int(math.floor(
float(post_vertex_slice.lo_atom) /
float(post_vertex_slice.n_atoms)))
# Get an estimate of the number of pre-sub-vertices - clearly
# this will not be correct if the SDRAM usage is high!
# TODO: Can be removed once we move to population-based keys
n_atoms_per_subvertex = sys.maxint
if isinstance(in_edge.pre_vertex, AbstractPartitionableVertex):
n_atoms_per_subvertex = \
in_edge.pre_vertex.get_max_atoms_per_core()
if in_edge.pre_vertex.n_atoms < n_atoms_per_subvertex:
n_atoms_per_subvertex = in_edge.pre_vertex.n_atoms
pre_slices = [Slice(
lo_atom, min(
in_edge.pre_vertex.n_atoms,
lo_atom + n_atoms_per_subvertex - 1))
for lo_atom in range(
0, in_edge.pre_vertex.n_atoms, n_atoms_per_subvertex)]
pre_slice_index = 0
for pre_vertex_slice in pre_slices:
memory_size += self._get_size_of_synapse_information(
in_edge.synapse_information, pre_slices,
pre_slice_index, post_slices, post_slice_index,
pre_vertex_slice, post_vertex_slice,
in_edge.n_delay_stages)
pre_slice_index += 1
return memory_size
def _get_size_of_synapse_information(
self, synapse_information, pre_slices, pre_slice_index,
post_slices, post_slice_index, pre_vertex_slice, post_vertex_slice,
n_delay_stages):
memory_size = 0
for synapse_info in synapse_information:
undelayed_size, delayed_size = \
self._synapse_io.get_sdram_usage_in_bytes(
synapse_info, pre_slices,
pre_slice_index, post_slices, post_slice_index,
pre_vertex_slice, post_vertex_slice,
n_delay_stages, self._population_table_type)
memory_size = self._population_table_type\
.get_next_allowed_address(memory_size)
memory_size += undelayed_size
memory_size = self._population_table_type\
.get_next_allowed_address(memory_size)
memory_size += delayed_size
return memory_size
def _get_synapse_dynamics_parameter_size(self, vertex_slice, in_edges):
""" Get the size of the synapse dynamics region
"""
return self._synapse_dynamics.get_parameters_sdram_usage_in_bytes(
vertex_slice.n_atoms, self._synapse_type.get_n_synapse_types())
def get_sdram_usage_in_bytes(self, vertex_slice, in_edges):
return (
self._get_synapse_params_size(vertex_slice) +
self._get_synapse_dynamics_parameter_size(vertex_slice, in_edges) +
self._get_estimate_synaptic_blocks_size(vertex_slice, in_edges) +
self._population_table_type.get_master_population_table_size(
vertex_slice, in_edges))
def _reserve_memory_regions(
self, spec, vertex, subvertex, vertex_slice, graph, sub_graph,
all_syn_block_sz, graph_mapper):
spec.reserve_memory_region(
region=constants.POPULATION_BASED_REGIONS.SYNAPSE_PARAMS.value,
size=self._get_synapse_params_size(vertex_slice),
label='SynapseParams')
in_edges = graph.incoming_edges_to_vertex(vertex)
master_pop_table_sz = \
self._population_table_type.get_exact_master_population_table_size(
subvertex, sub_graph, graph_mapper)
if master_pop_table_sz > 0:
spec.reserve_memory_region(
region=constants.POPULATION_BASED_REGIONS.POPULATION_TABLE
.value,
size=master_pop_table_sz, label='PopTable')
if all_syn_block_sz > 0:
spec.reserve_memory_region(
region=constants.POPULATION_BASED_REGIONS.SYNAPTIC_MATRIX
.value,
size=all_syn_block_sz, label='SynBlocks')
synapse_dynamics_sz = self._get_synapse_dynamics_parameter_size(
vertex_slice, in_edges)
if synapse_dynamics_sz != 0:
spec.reserve_memory_region(
region=constants.POPULATION_BASED_REGIONS.SYNAPSE_DYNAMICS
.value,
size=synapse_dynamics_sz, label='synapseDynamicsParams')
def get_number_of_mallocs_used_by_dsg(self):
return 4
@staticmethod
def _ring_buffer_expected_upper_bound(
weight_mean, weight_std_dev, spikes_per_second,
machine_timestep, n_synapses_in, sigma):
"""
Provides expected upper bound on accumulated values in a ring buffer
element.
Requires an assessment of maximum Poisson input rate.
Assumes knowledge of mean and SD of weight distribution, fan-in
& timestep.
All arguments should be assumed real values except n_synapses_in
which will be an integer.
weight_mean - Mean of weight distribution (in either nA or
microSiemens as required)
weight_std_dev - SD of weight distribution
spikes_per_second - Maximum expected Poisson rate in Hz
machine_timestep - in us
n_synapses_in - No of connected synapses
sigma - How many SD above the mean to go for upper bound;
a good starting choice is 5.0. Given length of simulation we
can set this for approximate number of saturation events
"""
# E[ number of spikes ] in a timestep
# x /1000000.0 = conversion between microsecond to second
average_spikes_per_timestep = (
float(n_synapses_in * spikes_per_second) *
(float(machine_timestep) / 1000000.0))
# Exact variance contribution from inherent Poisson variation
poisson_variance = average_spikes_per_timestep * (weight_mean ** 2)
# Upper end of range for Poisson summation required below
# upper_bound needs to be an integer
upper_bound = int(round(average_spikes_per_timestep +
constants.POSSION_SIGMA_SUMMATION_LIMIT *
math.sqrt(average_spikes_per_timestep)))
# Closed-form exact solution for summation that gives the variance
# contributed by weight distribution variation when modulated by
# Poisson PDF. Requires scipy.special for gamma and incomplete gamma
# functions. Beware: incomplete gamma doesn't work the same as
# Mathematica because (1) it's regularised and needs a further
# multiplication and (2) it's actually the complement that is needed
# i.e. 'gammaincc']
weight_variance = 0.0
if weight_std_dev > 0:
lngamma = special.gammaln(1 + upper_bound)
gammai = special.gammaincc(1 + upper_bound,
average_spikes_per_timestep)
big_ratio = (math.log(average_spikes_per_timestep) * upper_bound -
lngamma)
if -701.0 < big_ratio < 701.0 and big_ratio != 0.0:
log_weight_variance = (
-average_spikes_per_timestep +
math.log(average_spikes_per_timestep) +
2.0 * math.log(weight_std_dev) +
math.log(math.exp(average_spikes_per_timestep) * gammai -
math.exp(big_ratio)))
weight_variance = math.exp(log_weight_variance)
# upper bound calculation -> mean + n * SD
return ((average_spikes_per_timestep * weight_mean) +
(sigma * math.sqrt(poisson_variance + weight_variance)))
def _get_ring_buffer_to_input_left_shifts(
self, subvertex, sub_graph, graph_mapper, post_slices,
post_slice_index, post_vertex_slice, machine_timestep,
weight_scale):
""" Get the scaling of the ring buffer to provide as much accuracy as\
possible without too much overflow
"""
weight_scale_squared = weight_scale * weight_scale
n_synapse_types = self._synapse_type.get_n_synapse_types()
running_totals = [RunningStats() for _ in range(n_synapse_types)]
total_weights = numpy.zeros(n_synapse_types)
biggest_weight = numpy.zeros(n_synapse_types)
weights_signed = False
max_rate = self._spikes_per_second
for subedge in sub_graph.incoming_subedges_from_subvertex(subvertex):
pre_vertex_slice = graph_mapper.get_subvertex_slice(
subedge.pre_subvertex)
edge = graph_mapper.get_partitionable_edge_from_partitioned_edge(
subedge)
pre_slices = [
graph_mapper.get_subvertex_slice(subv)
for subv in graph_mapper.get_subvertices_from_vertex(
edge.pre_vertex)]
pre_slice_index = pre_slices.index(pre_vertex_slice)
if isinstance(edge, ProjectionPartitionableEdge):
for synapse_info in edge.synapse_information:
synapse_type = synapse_info.synapse_type
synapse_dynamics = synapse_info.synapse_dynamics
connector = synapse_info.connector
weight_mean = abs(synapse_dynamics.get_weight_mean(
connector, pre_slices, pre_slice_index,
post_slices, post_slice_index, pre_vertex_slice,
post_vertex_slice) * weight_scale)
n_connections = \
connector.get_n_connections_to_post_vertex_maximum(
pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice,
post_vertex_slice)
weight_variance = abs(synapse_dynamics.get_weight_variance(
connector, pre_slices, pre_slice_index,
post_slices, post_slice_index, pre_vertex_slice,
post_vertex_slice) * weight_scale_squared)
running_totals[synapse_type].add_items(
weight_mean, weight_variance, n_connections)
weight_max = (synapse_dynamics.get_weight_maximum(
connector, pre_slices, pre_slice_index,
post_slices, post_slice_index, pre_vertex_slice,
post_vertex_slice) * weight_scale)
biggest_weight[synapse_type] = max(
biggest_weight[synapse_type], weight_max)
spikes_per_tick = self._spikes_per_tick
if isinstance(edge.pre_vertex, SpikeSourcePoisson):
rate = edge.pre_vertex.rate
if hasattr(rate, "__getitem__"):
rate = max(rate)
elif isinstance(rate, RandomDistribution):
rate = utility_calls.get_maximum_probable_value(
rate, pre_vertex_slice.n_atoms)
if rate > max_rate:
max_rate = rate
spikes_per_tick = max(
1.0,
rate /
(1000000.0 / float(self._machine_time_step)))
total_weights[synapse_type] += spikes_per_tick * (
weight_max * n_connections)
if synapse_dynamics.are_weights_signed():
weights_signed = True
max_weights = numpy.zeros(n_synapse_types)
for synapse_type in range(n_synapse_types):
stats = running_totals[synapse_type]
max_weights[synapse_type] = min(
self._ring_buffer_expected_upper_bound(
stats.mean, stats.standard_deviation,
max_rate, machine_timestep, stats.n_items,
self._ring_buffer_sigma),
total_weights[synapse_type])
max_weights[synapse_type] = max(
max_weights[synapse_type], biggest_weight[synapse_type])
# Convert these to powers
max_weight_powers = [0 if w <= 0
else int(math.ceil(max(0, math.log(w, 2))))
for w in max_weights]
# If 2^max_weight_power equals the max weight, we have to add another
# power, as range is 0 - (just under 2^max_weight_power)!
max_weight_powers = [w + 1 if (2 ** w) >= a else w
for w, a in zip(max_weight_powers, max_weights)]
# If we have synapse dynamics that uses signed weights,
# Add another bit of shift to prevent overflows
if weights_signed:
max_weight_powers = [m + 1 for m in max_weight_powers]
return max_weight_powers
@staticmethod
def _get_weight_scale(ring_buffer_to_input_left_shift):
""" Return the amount to scale the weights by to convert them from \
floating point values to 16-bit fixed point numbers which can be \
shifted left by ring_buffer_to_input_left_shift to produce an\
s1615 fixed point number
"""
return float(math.pow(2, 16 - (ring_buffer_to_input_left_shift + 1)))
def _write_synapse_parameters(
self, spec, subvertex, subgraph, graph_mapper, post_slices,
post_slice_index, post_vertex_slice, input_type):
# Get the ring buffer shifts and scaling factors
weight_scale = input_type.get_global_weight_scale()
ring_buffer_shifts = self._get_ring_buffer_to_input_left_shifts(
subvertex, subgraph, graph_mapper, post_slices, post_slice_index,
post_vertex_slice, self._machine_time_step, weight_scale)
spec.switch_write_focus(
region=constants.POPULATION_BASED_REGIONS.SYNAPSE_PARAMS.value)
utility_calls.write_parameters_per_neuron(
spec, post_vertex_slice,
self._synapse_type.get_synapse_type_parameters())
spec.write_array(ring_buffer_shifts)
weight_scales = numpy.array([
self._get_weight_scale(r) * weight_scale
for r in ring_buffer_shifts])
return weight_scales
def _write_padding(
self, spec, synaptic_matrix_region, next_block_start_address):
next_block_allowed_address = self._population_table_type\
.get_next_allowed_address(next_block_start_address)
if next_block_allowed_address != next_block_start_address:
# Pad out data file with the added alignment bytes:
spec.comment("\nWriting population table required"
" padding\n")
spec.switch_write_focus(synaptic_matrix_region)
spec.set_register_value(
register_id=15,
data=next_block_allowed_address - next_block_start_address)
spec.write_value(
data=0xDD, repeats=15, repeats_is_register=True,
data_type=DataType.UINT8)
return next_block_allowed_address
return next_block_start_address
def _write_synaptic_matrix_and_master_population_table(
self, spec, post_slices, post_slice_index, subvertex,
post_vertex_slice, all_syn_block_sz, weight_scales,
master_pop_table_region, synaptic_matrix_region, routing_info,
graph_mapper, partitioned_graph):
""" Simultaneously generates both the master population table and
the synaptic matrix.
"""
spec.comment(
"\nWriting Synaptic Matrix and Master Population Table:\n")
# Track writes inside the synaptic matrix region:
next_block_start_address = 0
n_synapse_types = self._synapse_type.get_n_synapse_types()
# Get the edges
in_subedges = \
partitioned_graph.incoming_subedges_from_subvertex(subvertex)
# Set up the master population table
self._population_table_type.initialise_table(
spec, master_pop_table_region)
# For each subedge into the subvertex, create a synaptic list
for subedge in in_subedges:
edge = graph_mapper.get_partitionable_edge_from_partitioned_edge(
subedge)
if isinstance(edge, ProjectionPartitionableEdge):
spec.comment("\nWriting matrix for subedge:{}\n".format(
subedge.label))
pre_vertex_slice = graph_mapper.get_subvertex_slice(
subedge.pre_subvertex)
pre_slices = graph_mapper.get_subvertex_slices(edge.pre_vertex)
pre_slice_index = graph_mapper.get_subvertex_index(
subedge.pre_subvertex)
for synapse_info in edge.synapse_information:
(row_data, row_length, delayed_row_data,
delayed_row_length, delayed_source_ids, delay_stages) = \
self._synapse_io.get_synapses(
synapse_info, pre_slices, pre_slice_index,
post_slices, post_slice_index, pre_vertex_slice,
post_vertex_slice, edge.n_delay_stages,
self._population_table_type, n_synapse_types,
weight_scales)
if edge.delay_edge is not None:
edge.delay_edge.pre_vertex.add_delays(
pre_vertex_slice, delayed_source_ids, delay_stages)
elif delayed_source_ids.size != 0:
raise Exception("Found delayed source ids but no delay"
" edge for edge {}".format(edge.label))
if ((edge, synapse_info) in
self._pre_run_connection_holders):
holders = self._pre_run_connection_holders[
edge, synapse_info]
for connection_holder in holders:
connections = self._synapse_io.read_synapses(
synapse_info, pre_vertex_slice,
post_vertex_slice, row_length,
delayed_row_length, n_synapse_types,
weight_scales, row_data, delayed_row_data,
edge.n_delay_stages)
connection_holder.add_connections(connections)
connection_holder.finish()
if len(row_data) > 0:
next_block_start_address = self._write_padding(
spec, synaptic_matrix_region,
next_block_start_address)
spec.switch_write_focus(synaptic_matrix_region)
spec.write_array(row_data)
partition = partitioned_graph.get_partition_of_subedge(
subedge)
keys_and_masks = \
routing_info.get_keys_and_masks_from_partition(
partition)
self._population_table_type\
.update_master_population_table(
spec, next_block_start_address, row_length,
keys_and_masks, master_pop_table_region)
next_block_start_address += len(row_data) * 4
del row_data
if next_block_start_address > all_syn_block_sz:
raise Exception(
"Too much synaptic memory has been written:"
" {} of {} ".format(
next_block_start_address, all_syn_block_sz))
if len(delayed_row_data) > 0:
next_block_start_address = self._write_padding(
spec, synaptic_matrix_region,
next_block_start_address)
spec.switch_write_focus(synaptic_matrix_region)
spec.write_array(delayed_row_data)
keys_and_masks = self._delay_key_index[
(edge.pre_vertex, pre_vertex_slice.lo_atom,
pre_vertex_slice.hi_atom)]
self._population_table_type\
.update_master_population_table(
spec, next_block_start_address,
delayed_row_length, keys_and_masks,
master_pop_table_region)
next_block_start_address += len(delayed_row_data) * 4
del delayed_row_data
if next_block_start_address > all_syn_block_sz:
raise Exception(
"Too much synaptic memory has been written:"
" {} of {} ".format(
next_block_start_address, all_syn_block_sz))
self._population_table_type.finish_master_pop_table(
spec, master_pop_table_region)
def write_data_spec(
self, spec, vertex, post_vertex_slice, subvertex, placement,
partitioned_graph, graph, routing_info, graph_mapper, input_type):
# Create an index of delay keys into this subvertex
for subedge in partitioned_graph.incoming_subedges_from_subvertex(
subvertex):
edge = graph_mapper.get_partitionable_edge_from_partitioned_edge(
subedge)
if isinstance(edge.pre_vertex, DelayExtensionVertex):
pre_vertex_slice = graph_mapper.get_subvertex_slice(
subedge.pre_subvertex)
partition = partitioned_graph.get_partition_of_subedge(subedge)
self._delay_key_index[
(edge.pre_vertex.source_vertex, pre_vertex_slice.lo_atom,
pre_vertex_slice.hi_atom)] = \
routing_info.get_keys_and_masks_from_partition(partition)
post_slices = graph_mapper.get_subvertex_slices(vertex)
post_slice_index = graph_mapper.get_subvertex_index(subvertex)
# Reserve the memory
subvert_in_edges = partitioned_graph.incoming_subedges_from_subvertex(
subvertex)
all_syn_block_sz = self._get_exact_synaptic_blocks_size(
post_slices, post_slice_index, post_vertex_slice, graph_mapper,
subvertex, subvert_in_edges)
self._reserve_memory_regions(
spec, vertex, subvertex, post_vertex_slice, graph,
partitioned_graph, all_syn_block_sz, graph_mapper)
weight_scales = self._write_synapse_parameters(
spec, subvertex, partitioned_graph, graph_mapper, post_slices,
post_slice_index, post_vertex_slice, input_type)
self._write_synaptic_matrix_and_master_population_table(
spec, post_slices, post_slice_index, subvertex, post_vertex_slice,
all_syn_block_sz, weight_scales,
constants.POPULATION_BASED_REGIONS.POPULATION_TABLE.value,
constants.POPULATION_BASED_REGIONS.SYNAPTIC_MATRIX.value,
routing_info, graph_mapper, partitioned_graph)
self._synapse_dynamics.write_parameters(
spec, constants.POPULATION_BASED_REGIONS.SYNAPSE_DYNAMICS.value,
self._machine_time_step, weight_scales)
self._weight_scales[placement] = weight_scales
def get_connections_from_machine(
self, transceiver, placement, subedge, graph_mapper,
routing_infos, synapse_info, partitioned_graph):
edge = graph_mapper.get_partitionable_edge_from_partitioned_edge(
subedge)
if not isinstance(edge, ProjectionPartitionableEdge):
return None
# Get details for extraction
pre_vertex_slice = graph_mapper.get_subvertex_slice(
subedge.pre_subvertex)
post_vertex_slice = graph_mapper.get_subvertex_slice(
subedge.post_subvertex)
n_synapse_types = self._synapse_type.get_n_synapse_types()
# Get the key for the pre_subvertex
partition = partitioned_graph.get_partition_of_subedge(subedge)
key = routing_infos.get_keys_and_masks_from_partition(
partition)[0].key
# Get the key for the delayed pre_subvertex
delayed_key = None
if edge.delay_edge is not None:
delayed_key = self._delay_key_index[
(edge.pre_vertex, pre_vertex_slice.lo_atom,
pre_vertex_slice.hi_atom)][0].key
# Get the block for the connections from the pre_subvertex
master_pop_table_address = \
helpful_functions.locate_memory_region_for_placement(
placement,
constants.POPULATION_BASED_REGIONS.POPULATION_TABLE.value,
transceiver)
synaptic_matrix_address = \
helpful_functions.locate_memory_region_for_placement(
placement,
constants.POPULATION_BASED_REGIONS.SYNAPTIC_MATRIX.value,
transceiver)
data, max_row_length = self._retrieve_synaptic_block(
transceiver, placement, master_pop_table_address,
synaptic_matrix_address, key, pre_vertex_slice.n_atoms,
synapse_info.index)
# Get the block for the connections from the delayed pre_subvertex
delayed_data = None
delayed_max_row_length = 0
if delayed_key is not None:
delayed_data, delayed_max_row_length = \
self._retrieve_synaptic_block(
transceiver, placement, master_pop_table_address,
synaptic_matrix_address, delayed_key,
pre_vertex_slice.n_atoms * edge.n_delay_stages,
synapse_info.index)
# Convert the blocks into connections
return self._synapse_io.read_synapses(
synapse_info, pre_vertex_slice, post_vertex_slice,
max_row_length, delayed_max_row_length, n_synapse_types,
self._weight_scales[placement], data, delayed_data,
edge.n_delay_stages)
def _retrieve_synaptic_block(
self, transceiver, placement, master_pop_table_address,
synaptic_matrix_address, key, n_rows, index):
""" Read in a synaptic block from a given processor and subvertex on\
the machine
"""
# See if we have already got this block
if (placement, key, index) in self._retrieved_blocks:
return self._retrieved_blocks[(placement, key, index)]
items = \
self._population_table_type.extract_synaptic_matrix_data_location(
key, master_pop_table_address, transceiver,
placement.x, placement.y)
if index >= len(items):
return None, None
max_row_length, synaptic_block_offset = items[index]
block = None
if max_row_length > 0 and synaptic_block_offset is not None:
# calculate the synaptic block size in bytes
synaptic_block_size = self._synapse_io.get_block_n_bytes(
max_row_length, n_rows)
# read in and return the synaptic block
block = transceiver.read_memory(
placement.x, placement.y,
synaptic_matrix_address + synaptic_block_offset,
synaptic_block_size)
self._retrieved_blocks[(placement, key, index)] = (
block, max_row_length)
return block, max_row_length
# inherited from AbstractProvidesIncomingPartitionConstraints
def get_incoming_partition_constraints(self):
return self._population_table_type.get_edge_constraints() | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/synaptic_manager.py | 0.580709 | 0.40439 | synaptic_manager.py | pypi |
from pacman.model.partitionable_graph.abstract_partitionable_vertex \
import AbstractPartitionableVertex
from pacman.model.constraints.key_allocator_constraints\
.key_allocator_contiguous_range_constraint \
import KeyAllocatorContiguousRangeContraint
# front end common imports
from spinn_front_end_common.abstract_models.\
abstract_provides_incoming_partition_constraints import \
AbstractProvidesIncomingPartitionConstraints
from spinn_front_end_common.abstract_models.\
abstract_provides_outgoing_partition_constraints import \
AbstractProvidesOutgoingPartitionConstraints
from spinn_front_end_common.utilities import constants as \
common_constants
from spinn_front_end_common.interface.buffer_management\
.buffer_models.receives_buffers_to_host_basic_impl \
import ReceiveBuffersToHostBasicImpl
from spinn_front_end_common.abstract_models.abstract_data_specable_vertex \
import AbstractDataSpecableVertex
# spynnaker imports
from spynnaker.pyNN.models.neuron.synaptic_manager import SynapticManager
from spynnaker.pyNN.utilities import utility_calls
from spynnaker.pyNN.models.common import recording_utils
from spynnaker.pyNN.models.abstract_models.abstract_population_initializable \
import AbstractPopulationInitializable
from spynnaker.pyNN.models.abstract_models.abstract_population_settable \
import AbstractPopulationSettable
from spinn_front_end_common.abstract_models.abstract_changable_after_run \
import AbstractChangableAfterRun
from spynnaker.pyNN.models.common.abstract_spike_recordable \
import AbstractSpikeRecordable
from spynnaker.pyNN.models.common.abstract_v_recordable \
import AbstractVRecordable
from spynnaker.pyNN.models.common.abstract_gsyn_recordable \
import AbstractGSynRecordable
from spynnaker.pyNN.models.common.spike_recorder import SpikeRecorder
from spynnaker.pyNN.models.common.v_recorder import VRecorder
from spynnaker.pyNN.models.common.gsyn_recorder import GsynRecorder
from spynnaker.pyNN.utilities import constants
from spynnaker.pyNN.utilities.conf import config
from spynnaker.pyNN.models.neuron.population_partitioned_vertex \
import PopulationPartitionedVertex
# dsg imports
from data_specification.data_specification_generator \
import DataSpecificationGenerator
from abc import ABCMeta
from six import add_metaclass
import logging
import os
logger = logging.getLogger(__name__)
# TODO: Make sure these values are correct (particularly CPU cycles)
_NEURON_BASE_DTCM_USAGE_IN_BYTES = 36
_NEURON_BASE_SDRAM_USAGE_IN_BYTES = 12
_NEURON_BASE_N_CPU_CYCLES_PER_NEURON = 22
_NEURON_BASE_N_CPU_CYCLES = 10
# TODO: Make sure these values are correct (particularly CPU cycles)
_C_MAIN_BASE_DTCM_USAGE_IN_BYTES = 12
_C_MAIN_BASE_SDRAM_USAGE_IN_BYTES = 72
_C_MAIN_BASE_N_CPU_CYCLES = 0
@add_metaclass(ABCMeta)
class AbstractPopulationVertex(
AbstractPartitionableVertex, AbstractDataSpecableVertex,
AbstractSpikeRecordable, AbstractVRecordable, AbstractGSynRecordable,
AbstractProvidesOutgoingPartitionConstraints,
AbstractProvidesIncomingPartitionConstraints,
AbstractPopulationInitializable, AbstractPopulationSettable,
AbstractChangableAfterRun):
""" Underlying vertex model for Neural Populations.
"""
def __init__(
self, n_neurons, binary, label, max_atoms_per_core,
machine_time_step, timescale_factor, spikes_per_second,
ring_buffer_sigma, incoming_spike_buffer_size, model_name,
neuron_model, input_type, synapse_type, threshold_type,
additional_input=None, constraints=None):
AbstractPartitionableVertex.__init__(
self, n_neurons, label, max_atoms_per_core, constraints)
AbstractDataSpecableVertex.__init__(
self, machine_time_step, timescale_factor)
AbstractSpikeRecordable.__init__(self)
AbstractVRecordable.__init__(self)
AbstractGSynRecordable.__init__(self)
AbstractProvidesOutgoingPartitionConstraints.__init__(self)
AbstractProvidesIncomingPartitionConstraints.__init__(self)
AbstractPopulationInitializable.__init__(self)
AbstractPopulationSettable.__init__(self)
AbstractChangableAfterRun.__init__(self)
self._binary = binary
self._label = label
self._machine_time_step = machine_time_step
self._timescale_factor = timescale_factor
self._incoming_spike_buffer_size = incoming_spike_buffer_size
if incoming_spike_buffer_size is None:
self._incoming_spike_buffer_size = config.getint(
"Simulation", "incoming_spike_buffer_size")
self._model_name = model_name
self._neuron_model = neuron_model
self._input_type = input_type
self._threshold_type = threshold_type
self._additional_input = additional_input
# Set up for recording
self._spike_recorder = SpikeRecorder(machine_time_step)
self._v_recorder = VRecorder(machine_time_step)
self._gsyn_recorder = GsynRecorder(machine_time_step)
self._spike_buffer_max_size = config.getint(
"Buffers", "spike_buffer_size")
self._v_buffer_max_size = config.getint(
"Buffers", "v_buffer_size")
self._gsyn_buffer_max_size = config.getint(
"Buffers", "gsyn_buffer_size")
self._buffer_size_before_receive = config.getint(
"Buffers", "buffer_size_before_receive")
self._time_between_requests = config.getint(
"Buffers", "time_between_requests")
self._minimum_buffer_sdram = config.getint(
"Buffers", "minimum_buffer_sdram")
self._using_auto_pause_and_resume = config.getboolean(
"Buffers", "use_auto_pause_and_resume")
self._receive_buffer_host = config.get(
"Buffers", "receive_buffer_host")
self._receive_buffer_port = config.getint(
"Buffers", "receive_buffer_port")
self._enable_buffered_recording = config.getboolean(
"Buffers", "enable_buffered_recording")
# Set up synapse handling
self._synapse_manager = SynapticManager(
synapse_type, machine_time_step, ring_buffer_sigma,
spikes_per_second)
# bool for if state has changed.
self._change_requires_mapping = True
@property
def requires_mapping(self):
return self._change_requires_mapping
def mark_no_changes(self):
self._change_requires_mapping = False
def create_subvertex(
self, vertex_slice, resources_required, label=None,
constraints=None):
is_recording = (
self._gsyn_recorder.record_gsyn or self._v_recorder.record_v or
self._spike_recorder.record
)
subvertex = PopulationPartitionedVertex(
resources_required, label, is_recording, constraints)
if not self._using_auto_pause_and_resume:
spike_buffer_size = self._spike_recorder.get_sdram_usage_in_bytes(
vertex_slice.n_atoms, self._no_machine_time_steps)
v_buffer_size = self._v_recorder.get_sdram_usage_in_bytes(
vertex_slice.n_atoms, self._no_machine_time_steps)
gsyn_buffer_size = self._gsyn_recorder.get_sdram_usage_in_bytes(
vertex_slice.n_atoms, self._no_machine_time_steps)
spike_buffering_needed = recording_utils.needs_buffering(
self._spike_buffer_max_size, spike_buffer_size,
self._enable_buffered_recording)
v_buffering_needed = recording_utils.needs_buffering(
self._v_buffer_max_size, v_buffer_size,
self._enable_buffered_recording)
gsyn_buffering_needed = recording_utils.needs_buffering(
self._gsyn_buffer_max_size, gsyn_buffer_size,
self._enable_buffered_recording)
if (spike_buffering_needed or v_buffering_needed or
gsyn_buffering_needed):
subvertex.activate_buffering_output(
buffering_ip_address=self._receive_buffer_host,
buffering_port=self._receive_buffer_port)
else:
sdram_per_ts = 0
sdram_per_ts += self._spike_recorder.get_sdram_usage_in_bytes(
vertex_slice.n_atoms, 1)
sdram_per_ts += self._v_recorder.get_sdram_usage_in_bytes(
vertex_slice.n_atoms, 1)
sdram_per_ts += self._gsyn_recorder.get_sdram_usage_in_bytes(
vertex_slice.n_atoms, 1)
subvertex.activate_buffering_output(
minimum_sdram_for_buffering=self._minimum_buffer_sdram,
buffered_sdram_per_timestep=sdram_per_ts)
return subvertex
@property
def maximum_delay_supported_in_ms(self):
return self._synapse_manager.maximum_delay_supported_in_ms
# @implements AbstractPopulationVertex.get_cpu_usage_for_atoms
def get_cpu_usage_for_atoms(self, vertex_slice, graph):
per_neuron_cycles = (
_NEURON_BASE_N_CPU_CYCLES_PER_NEURON +
self._neuron_model.get_n_cpu_cycles_per_neuron() +
self._input_type.get_n_cpu_cycles_per_neuron(
self._synapse_manager.synapse_type.get_n_synapse_types()) +
self._threshold_type.get_n_cpu_cycles_per_neuron())
if self._additional_input is not None:
per_neuron_cycles += \
self._additional_input.get_n_cpu_cycles_per_neuron()
return (_NEURON_BASE_N_CPU_CYCLES +
_C_MAIN_BASE_N_CPU_CYCLES +
(per_neuron_cycles * vertex_slice.n_atoms) +
self._spike_recorder.get_n_cpu_cycles(vertex_slice.n_atoms) +
self._v_recorder.get_n_cpu_cycles(vertex_slice.n_atoms) +
self._gsyn_recorder.get_n_cpu_cycles(vertex_slice.n_atoms) +
self._synapse_manager.get_n_cpu_cycles(vertex_slice, graph))
# @implements AbstractPopulationVertex.get_dtcm_usage_for_atoms
def get_dtcm_usage_for_atoms(self, vertex_slice, graph):
per_neuron_usage = (
self._neuron_model.get_dtcm_usage_per_neuron_in_bytes() +
self._input_type.get_dtcm_usage_per_neuron_in_bytes() +
self._threshold_type.get_dtcm_usage_per_neuron_in_bytes())
if self._additional_input is not None:
per_neuron_usage += \
self._additional_input.get_dtcm_usage_per_neuron_in_bytes()
return (_NEURON_BASE_DTCM_USAGE_IN_BYTES +
(per_neuron_usage * vertex_slice.n_atoms) +
self._spike_recorder.get_dtcm_usage_in_bytes() +
self._v_recorder.get_dtcm_usage_in_bytes() +
self._gsyn_recorder.get_dtcm_usage_in_bytes() +
self._synapse_manager.get_dtcm_usage_in_bytes(
vertex_slice, graph))
def _get_sdram_usage_for_neuron_params(self, vertex_slice):
per_neuron_usage = (
self._input_type.get_sdram_usage_per_neuron_in_bytes() +
self._threshold_type.get_sdram_usage_per_neuron_in_bytes())
if self._additional_input is not None:
per_neuron_usage += \
self._additional_input.get_sdram_usage_per_neuron_in_bytes()
return ((common_constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) +
ReceiveBuffersToHostBasicImpl.get_recording_data_size(3) +
(per_neuron_usage * vertex_slice.n_atoms) +
self._neuron_model.get_sdram_usage_in_bytes(
vertex_slice.n_atoms))
# @implements AbstractPartitionableVertex.get_sdram_usage_for_atoms
def get_sdram_usage_for_atoms(self, vertex_slice, graph):
sdram_requirement = (
self._get_sdram_usage_for_neuron_params(vertex_slice) +
ReceiveBuffersToHostBasicImpl.get_buffer_state_region_size(3) +
PopulationPartitionedVertex.get_provenance_data_size(
PopulationPartitionedVertex
.N_ADDITIONAL_PROVENANCE_DATA_ITEMS) +
self._synapse_manager.get_sdram_usage_in_bytes(
vertex_slice, graph.incoming_edges_to_vertex(self)) +
(self._get_number_of_mallocs_used_by_dsg(
vertex_slice, graph.incoming_edges_to_vertex(self)) *
common_constants.SARK_PER_MALLOC_SDRAM_USAGE))
# add recording SDRAM if not automatically calculated
if not self._using_auto_pause_and_resume:
spike_buffer_size = self._spike_recorder.get_sdram_usage_in_bytes(
vertex_slice.n_atoms, self._no_machine_time_steps)
v_buffer_size = self._v_recorder.get_sdram_usage_in_bytes(
vertex_slice.n_atoms, self._no_machine_time_steps)
gsyn_buffer_size = self._gsyn_recorder.get_sdram_usage_in_bytes(
vertex_slice.n_atoms, self._no_machine_time_steps)
sdram_requirement += recording_utils.get_buffer_sizes(
self._spike_buffer_max_size, spike_buffer_size,
self._enable_buffered_recording)
sdram_requirement += recording_utils.get_buffer_sizes(
self._v_buffer_max_size, v_buffer_size,
self._enable_buffered_recording)
sdram_requirement += recording_utils.get_buffer_sizes(
self._gsyn_buffer_max_size, gsyn_buffer_size,
self._enable_buffered_recording)
else:
sdram_requirement += self._minimum_buffer_sdram
return sdram_requirement
# @implements AbstractPopulationVertex.model_name
def model_name(self):
return self._model_name
def _get_number_of_mallocs_used_by_dsg(self, vertex_slice, in_edges):
extra_mallocs = 0
if self._gsyn_recorder.record_gsyn:
extra_mallocs += 1
if self._v_recorder.record_v:
extra_mallocs += 1
if self._spike_recorder.record:
extra_mallocs += 1
return (
2 + self._synapse_manager.get_number_of_mallocs_used_by_dsg() +
extra_mallocs)
def _get_number_of_mallocs_from_basic_model(self):
# one for system, one for neuron params
return 2
def _reserve_memory_regions(
self, spec, vertex_slice, spike_history_region_sz,
v_history_region_sz, gsyn_history_region_sz, subvertex):
spec.comment("\nReserving memory space for data regions:\n\n")
# Reserve memory:
spec.reserve_memory_region(
region=constants.POPULATION_BASED_REGIONS.SYSTEM.value,
size=((
common_constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4) +
subvertex.get_recording_data_size(3)), label='System')
spec.reserve_memory_region(
region=constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value,
size=self._get_sdram_usage_for_neuron_params(vertex_slice),
label='NeuronParams')
subvertex.reserve_buffer_regions(
spec,
constants.POPULATION_BASED_REGIONS.BUFFERING_OUT_STATE.value,
[constants.POPULATION_BASED_REGIONS.SPIKE_HISTORY.value,
constants.POPULATION_BASED_REGIONS.POTENTIAL_HISTORY.value,
constants.POPULATION_BASED_REGIONS.GSYN_HISTORY.value],
[spike_history_region_sz, v_history_region_sz,
gsyn_history_region_sz])
subvertex.reserve_provenance_data_region(spec)
def _write_setup_info(
self, spec, spike_history_region_sz, neuron_potential_region_sz,
gsyn_region_sz, ip_tags, buffer_size_before_receive,
time_between_requests, subvertex):
""" Write information used to control the simulation and gathering of\
results.
"""
# Write this to the system region (to be picked up by the simulation):
self._write_basic_setup_info(
spec, constants.POPULATION_BASED_REGIONS.SYSTEM.value)
subvertex.write_recording_data(
spec, ip_tags,
[spike_history_region_sz, neuron_potential_region_sz,
gsyn_region_sz], buffer_size_before_receive,
time_between_requests)
def _write_neuron_parameters(
self, spec, key, vertex_slice):
n_atoms = (vertex_slice.hi_atom - vertex_slice.lo_atom) + 1
spec.comment("\nWriting Neuron Parameters for {} Neurons:\n".format(
n_atoms))
# Set the focus to the memory region 2 (neuron parameters):
spec.switch_write_focus(
region=constants.POPULATION_BASED_REGIONS.NEURON_PARAMS.value)
# Write whether the key is to be used, and then the key, or 0 if it
# isn't to be used
if key is None:
spec.write_value(data=0)
spec.write_value(data=0)
else:
spec.write_value(data=1)
spec.write_value(data=key)
# Write the number of neurons in the block:
spec.write_value(data=n_atoms)
# Write the size of the incoming spike buffer
spec.write_value(data=self._incoming_spike_buffer_size)
# Write the global parameters
global_params = self._neuron_model.get_global_parameters()
for param in global_params:
spec.write_value(data=param.get_value(),
data_type=param.get_dataspec_datatype())
# Write the neuron parameters
utility_calls.write_parameters_per_neuron(
spec, vertex_slice, self._neuron_model.get_neural_parameters())
# Write the input type parameters
utility_calls.write_parameters_per_neuron(
spec, vertex_slice, self._input_type.get_input_type_parameters())
# Write the additional input parameters
if self._additional_input is not None:
utility_calls.write_parameters_per_neuron(
spec, vertex_slice, self._additional_input.get_parameters())
# Write the threshold type parameters
utility_calls.write_parameters_per_neuron(
spec, vertex_slice,
self._threshold_type.get_threshold_parameters())
# @implements AbstractDataSpecableVertex.generate_data_spec
def generate_data_spec(
self, subvertex, placement, partitioned_graph, graph, routing_info,
hostname, graph_mapper, report_folder, ip_tags,
reverse_ip_tags, write_text_specs, application_run_time_folder):
# Create new DataSpec for this processor:
data_writer, report_writer = self.get_data_spec_file_writers(
placement.x, placement.y, placement.p, hostname, report_folder,
write_text_specs, application_run_time_folder)
spec = DataSpecificationGenerator(data_writer, report_writer)
spec.comment("\n*** Spec for block of {} neurons ***\n".format(
self.model_name))
vertex_slice = graph_mapper.get_subvertex_slice(subvertex)
# Get recording sizes - the order is important here as spikes will
# require less space than voltage and voltage less than gsyn. This
# order ensures that the buffer size before receive is optimum for
# all recording channels
# TODO: Maybe split the buffer size before receive by channel?
spike_buffer_size = self._spike_recorder.get_sdram_usage_in_bytes(
vertex_slice.n_atoms, self._no_machine_time_steps)
v_buffer_size = self._v_recorder.get_sdram_usage_in_bytes(
vertex_slice.n_atoms, self._no_machine_time_steps)
gsyn_buffer_size = self._gsyn_recorder.get_sdram_usage_in_bytes(
vertex_slice.n_atoms, self._no_machine_time_steps)
spike_history_sz = recording_utils.get_buffer_sizes(
self._spike_buffer_max_size, spike_buffer_size,
self._enable_buffered_recording)
v_history_sz = recording_utils.get_buffer_sizes(
self._v_buffer_max_size, v_buffer_size,
self._enable_buffered_recording)
gsyn_history_sz = recording_utils.get_buffer_sizes(
self._gsyn_buffer_max_size, gsyn_buffer_size,
self._enable_buffered_recording)
spike_buffering_needed = recording_utils.needs_buffering(
self._spike_buffer_max_size, spike_buffer_size,
self._enable_buffered_recording)
v_buffering_needed = recording_utils.needs_buffering(
self._v_buffer_max_size, v_buffer_size,
self._enable_buffered_recording)
gsyn_buffering_needed = recording_utils.needs_buffering(
self._gsyn_buffer_max_size, gsyn_buffer_size,
self._enable_buffered_recording)
buffer_size_before_receive = self._buffer_size_before_receive
if (not spike_buffering_needed and not v_buffering_needed and
not gsyn_buffering_needed):
buffer_size_before_receive = max((
spike_history_sz, v_history_sz, gsyn_history_sz)) + 256
# Reserve memory regions
self._reserve_memory_regions(
spec, vertex_slice, spike_history_sz, v_history_sz,
gsyn_history_sz, subvertex)
# Declare random number generators and distributions:
# TODO add random distribution stuff
# self.write_random_distribution_declarations(spec)
# Get the key - use only the first edge
key = None
for partition in partitioned_graph.\
outgoing_edges_partitions_from_vertex(subvertex).values():
keys_and_masks = \
routing_info.get_keys_and_masks_from_partition(partition)
# NOTE: using the first key assigned as the key. Should in future
# get the list of keys and use one per neuron, to allow arbitrary
# key and mask assignments
key = keys_and_masks[0].key
# Write the regions
self._write_setup_info(
spec, spike_history_sz, v_history_sz, gsyn_history_sz, ip_tags,
buffer_size_before_receive, self._time_between_requests, subvertex)
self._write_neuron_parameters(spec, key, vertex_slice)
# allow the synaptic matrix to write its data spec-able data
self._synapse_manager.write_data_spec(
spec, self, vertex_slice, subvertex, placement, partitioned_graph,
graph, routing_info, graph_mapper, self._input_type)
# End the writing of this specification:
spec.end_specification()
data_writer.close()
return data_writer.filename
# @implements AbstractDataSpecableVertex.get_binary_file_name
def get_binary_file_name(self):
# Split binary name into title and extension
binary_title, binary_extension = os.path.splitext(self._binary)
# Reunite title and extension and return
return (binary_title + self._synapse_manager.vertex_executable_suffix +
binary_extension)
# @implements AbstractSpikeRecordable.is_recording_spikes
def is_recording_spikes(self):
return self._spike_recorder.record
# @implements AbstractSpikeRecordable.set_recording_spikes
def set_recording_spikes(self):
self._change_requires_mapping = not self._spike_recorder.record
self._spike_recorder.record = True
# @implements AbstractSpikeRecordable.get_spikes
def get_spikes(self, placements, graph_mapper, buffer_manager):
return self._spike_recorder.get_spikes(
self._label, buffer_manager,
constants.POPULATION_BASED_REGIONS.SPIKE_HISTORY.value,
constants.POPULATION_BASED_REGIONS.BUFFERING_OUT_STATE.value,
placements, graph_mapper, self)
# @implements AbstractVRecordable.is_recording_v
def is_recording_v(self):
return self._v_recorder.record_v
# @implements AbstractVRecordable.set_recording_v
def set_recording_v(self):
self._change_requires_mapping = not self._v_recorder.record_v
self._v_recorder.record_v = True
# @implements AbstractVRecordable.get_v
def get_v(self, n_machine_time_steps, placements, graph_mapper,
buffer_manager):
return self._v_recorder.get_v(
self._label, buffer_manager,
constants.POPULATION_BASED_REGIONS.POTENTIAL_HISTORY.value,
constants.POPULATION_BASED_REGIONS.BUFFERING_OUT_STATE.value,
placements, graph_mapper, self)
# @implements AbstractGSynRecordable.is_recording_gsyn
def is_recording_gsyn(self):
return self._gsyn_recorder.record_gsyn
# @implements AbstractGSynRecordable.set_recording_gsyn
def set_recording_gsyn(self):
self._change_requires_mapping = not self._gsyn_recorder.record_gsyn
self._gsyn_recorder.record_gsyn = True
# @implements AbstractGSynRecordable.get_gsyn
def get_gsyn(self, n_machine_time_steps, placements, graph_mapper,
buffer_manager):
return self._gsyn_recorder.get_gsyn(
self._label, buffer_manager,
constants.POPULATION_BASED_REGIONS.GSYN_HISTORY.value,
constants.POPULATION_BASED_REGIONS.BUFFERING_OUT_STATE.value,
placements, graph_mapper, self)
def initialize(self, variable, value):
initialize_attr = getattr(
self._neuron_model, "initialize_%s" % variable, None)
if initialize_attr is None or not callable(initialize_attr):
raise Exception("Vertex does not support initialisation of"
" parameter {}".format(variable))
initialize_attr(value)
self._change_requires_mapping = True
@property
def synapse_type(self):
return self._synapse_manager.synapse_type
@property
def input_type(self):
return self._input_type
def get_value(self, key):
""" Get a property of the overall model
"""
for obj in [self._neuron_model, self._input_type,
self._threshold_type, self._synapse_manager.synapse_type,
self._additional_input]:
if hasattr(obj, key):
return getattr(obj, key)
raise Exception("Population {} does not have parameter {}".format(
self.vertex, key))
def set_value(self, key, value):
""" Set a property of the overall model
"""
for obj in [self._neuron_model, self._input_type,
self._threshold_type, self._synapse_manager.synapse_type,
self._additional_input]:
if hasattr(obj, key):
setattr(obj, key, value)
self._change_requires_mapping = True
return
raise Exception("Type {} does not have parameter {}".format(
self._model_name, key))
@property
def weight_scale(self):
return self._input_type.get_global_weight_scale()
@property
def ring_buffer_sigma(self):
return self._synapse_manager.ring_buffer_sigma
@ring_buffer_sigma.setter
def ring_buffer_sigma(self, ring_buffer_sigma):
self._synapse_manager.ring_buffer_sigma = ring_buffer_sigma
@property
def spikes_per_second(self):
return self._synapse_manager.spikes_per_second
@spikes_per_second.setter
def spikes_per_second(self, spikes_per_second):
self._synapse_manager.spikes_per_second = spikes_per_second
@property
def synapse_dynamics(self):
return self._synapse_manager.synapse_dynamics
@synapse_dynamics.setter
def synapse_dynamics(self, synapse_dynamics):
self._synapse_manager.synapse_dynamics = synapse_dynamics
def add_pre_run_connection_holder(
self, connection_holder, edge, synapse_info):
self._synapse_manager.add_pre_run_connection_holder(
connection_holder, edge, synapse_info)
def get_connections_from_machine(
self, transceiver, placement, subedge, graph_mapper,
routing_infos, synapse_info, partitioned_graph):
return self._synapse_manager.get_connections_from_machine(
transceiver, placement, subedge, graph_mapper,
routing_infos, synapse_info, partitioned_graph)
def is_data_specable(self):
return True
def get_incoming_partition_constraints(self, partition, graph_mapper):
""" Gets the constraints for partitions going into this vertex
:param partition: partition that goes into this vertex
:param graph_mapper: the graph mapper object
:return: list of constraints
"""
return self._synapse_manager.get_incoming_partition_constraints()
def get_outgoing_partition_constraints(self, partition, graph_mapper):
""" Gets the constraints for partitions going out of this vertex
:param partition: the partition that leaves this vertex
:param graph_mapper: the graph mapper object
:return: list of constraints
"""
return [KeyAllocatorContiguousRangeContraint()]
def __str__(self):
return "{} with {} atoms".format(self._label, self.n_atoms)
def __repr__(self):
return self.__str__() | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/abstract_population_vertex.py | 0.469034 | 0.32122 | abstract_population_vertex.py | pypi |
from pacman.model.partitioned_graph.partitioned_vertex import PartitionedVertex
# spinn front end common imports
from spinn_front_end_common.utilities.utility_objs\
.provenance_data_item import ProvenanceDataItem
from spinn_front_end_common.interface.provenance\
.provides_provenance_data_from_machine_impl \
import ProvidesProvenanceDataFromMachineImpl
from spinn_front_end_common.interface.buffer_management.buffer_models\
.receives_buffers_to_host_basic_impl import ReceiveBuffersToHostBasicImpl
from spinn_front_end_common.abstract_models.abstract_recordable \
import AbstractRecordable
# spynnaker imports
from spynnaker.pyNN.utilities import constants
from enum import Enum
class PopulationPartitionedVertex(
PartitionedVertex, ReceiveBuffersToHostBasicImpl,
ProvidesProvenanceDataFromMachineImpl, AbstractRecordable):
# entries for the provenance data generated by standard neuron models
EXTRA_PROVENANCE_DATA_ENTRIES = Enum(
value="EXTRA_PROVENANCE_DATA_ENTRIES",
names=[("PRE_SYNAPTIC_EVENT_COUNT", 0),
("SATURATION_COUNT", 1),
("BUFFER_OVERFLOW_COUNT", 2),
("CURRENT_TIMER_TIC", 3)])
N_ADDITIONAL_PROVENANCE_DATA_ITEMS = 4
def __init__(
self, resources_required, label, is_recording, constraints=None):
PartitionedVertex.__init__(
self, resources_required, label, constraints)
ReceiveBuffersToHostBasicImpl.__init__(self)
ProvidesProvenanceDataFromMachineImpl.__init__(
self, constants.POPULATION_BASED_REGIONS.PROVENANCE_DATA.value,
self.N_ADDITIONAL_PROVENANCE_DATA_ITEMS)
AbstractRecordable.__init__(self)
self._is_recording = is_recording
def is_recording(self):
return self._is_recording
def get_provenance_data_from_machine(self, transceiver, placement):
provenance_data = self._read_provenance_data(transceiver, placement)
provenance_items = self._read_basic_provenance_items(
provenance_data, placement)
provenance_data = self._get_remaining_provenance_data_items(
provenance_data)
n_saturations = provenance_data[
self.EXTRA_PROVENANCE_DATA_ENTRIES.SATURATION_COUNT.value]
n_buffer_overflows = provenance_data[
self.EXTRA_PROVENANCE_DATA_ENTRIES.BUFFER_OVERFLOW_COUNT.value]
n_pre_synaptic_events = provenance_data[
self.EXTRA_PROVENANCE_DATA_ENTRIES.PRE_SYNAPTIC_EVENT_COUNT.value]
last_timer_tick = provenance_data[
self.EXTRA_PROVENANCE_DATA_ENTRIES.CURRENT_TIMER_TIC.value]
label, x, y, p, names = self._get_placement_details(placement)
# translate into provenance data items
provenance_items.append(ProvenanceDataItem(
self._add_name(names, "Times_synaptic_weights_have_saturated"),
n_saturations,
report=n_saturations > 0,
message=(
"The weights from the synapses for {} on {}, {}, {} saturated "
"{} times. If this causes issues you can increase the "
"spikes_per_second and / or ring_buffer_sigma "
"values located within the .spynnaker.cfg file.".format(
label, x, y, p, n_saturations))))
provenance_items.append(ProvenanceDataItem(
self._add_name(names, "Times_the_input_buffer_lost_packets"),
n_buffer_overflows,
report=n_buffer_overflows > 0,
message=(
"The input buffer for {} on {}, {}, {} lost packets on {} "
"occasions. This is often a sign that the system is running "
"too quickly for the number of neurons per core. Please "
"increase the timer_tic or time_scale_factor or decrease the "
"number of neurons per core.".format(
label, x, y, p, n_buffer_overflows))))
provenance_items.append(ProvenanceDataItem(
self._add_name(names, "Total_pre_synaptic_events"),
n_pre_synaptic_events))
provenance_items.append(ProvenanceDataItem(
self._add_name(names, "Last_timer_tic_the_core_ran_to"),
last_timer_tick))
return provenance_items | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/population_partitioned_vertex.py | 0.700895 | 0.252666 | population_partitioned_vertex.py | pypi |
from abc import ABCMeta
from six import add_metaclass
from abc import abstractmethod
import logging
logger = logging.getLogger(__name__)
@add_metaclass(ABCMeta)
class AbstractMasterPopTableFactory(object):
def __init__(self):
pass
@abstractmethod
def extract_synaptic_matrix_data_location(
self, incoming_key, master_pop_base_mem_address, txrx, chip_x,
chip_y):
"""
:param incoming_key: the source key which the synaptic matrix needs to\
be mapped to
:param master_pop_base_mem_address: the base address of the master pop
:param txrx: the transceiver object from spinnman
:param chip_y: the y coordinate of the chip of this master pop
:param chip_x: the x coordinate of the chip of this master pop
:type incoming_key: int
:type master_pop_base_mem_address: int
:type chip_y: int
:type chip_x: int
:type txrx: spinnman.transciever.Transciever object
:return: a synaptic matrix memory position.
"""
@abstractmethod
def update_master_population_table(
self, spec, block_start_addr, row_length, keys_and_masks,
master_pop_table_region):
""" updates a spec with a master pop entry in some form
:param spec: the spec to write the master pop entry to
:param block_start_addr: the start address of the master pop table
:param row_length: the row length of this entry
:param keys_and_masks: list of key_and_mask objects containing the\
keys and masks for a given edge that will require being\
received to be stored in the master pop table
:type keys_and_masks: list of\
:py:class:`pacman.model.routing_info.key_and_mask.KeyAndMask`
:param master_pop_table_region: the region to which the master pop\
table is being stored
:return:
"""
@abstractmethod
def finish_master_pop_table(self, spec, master_pop_table_region):
""" completes the master pop table in the spec
:param spec: the spec to write the master pop entry to
:param master_pop_table_region: the region to which the master pop\
table is being stored
:return:
"""
@abstractmethod
def get_edge_constraints(self):
""" Gets the constraints for this table on edges coming in to a vertex
that uses
:return: a list of constraints
:rtype: list of\
:py:class:`pacman.model.constraints.abstract_constraint.AbstractConstraint`
:raise None: this method does not raise any known exceptions
"""
@abstractmethod
def get_master_population_table_size(self, vertex_slice, in_edges):
""" Get the size of the master population table in SDRAM
""" | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/master_pop_table_generators/abstract_master_pop_table_factory.py | 0.898658 | 0.290364 | abstract_master_pop_table_factory.py | pypi |
from spynnaker.pyNN.models.neural_projections.projection_partitioned_edge import \
ProjectionPartitionedEdge
from spynnaker.pyNN.models.neuron.master_pop_table_generators\
.abstract_master_pop_table_factory import AbstractMasterPopTableFactory
import struct
from spynnaker.pyNN.models.neural_projections.projection_partitionable_edge \
import ProjectionPartitionableEdge
from pacman.model.partitionable_graph.abstract_partitionable_vertex\
import AbstractPartitionableVertex
# general imports
import logging
import numpy
import sys
import math
logger = logging.getLogger(__name__)
class _MasterPopEntry(object):
""" internal class that contains a master pop entry
"""
MASTER_POP_ENTRY_SIZE_BYTES = 12
MASTER_POP_ENTRY_SIZE_WORDS = 3
ADDRESS_LIST_ENTRY_SIZE_BYTES = 4
ADDRESS_LIST_ENTRY_SIZE_WORDS = 1
def __init__(self, routing_key, mask):
self._routing_key = routing_key
self._mask = mask
self._addresses_and_row_lengths = list()
def append(self, address, row_length):
self._addresses_and_row_lengths.append((address, row_length))
@property
def routing_key(self):
"""
:return: the key combo of this entry
"""
return self._routing_key
@property
def mask(self):
"""
:return: the mask of the key for this master pop entry
"""
return self._mask
@property
def addresses_and_row_lengths(self):
"""
:return: the memory address that this master pop entry points at
(synaptic matrix)
"""
return self._addresses_and_row_lengths
class MasterPopTableAsBinarySearch(AbstractMasterPopTableFactory):
"""
binary search master pop class.
"""
# Switched ordering of count and start as numpy will switch them back
# when asked for view("<4")
MASTER_POP_ENTRY_DTYPE = [
("key", "<u4"), ("mask", "<u4"), ("start", "<u2"), ("count", "<u2")]
ADDRESS_LIST_DTYPE = "<u4"
def __init__(self):
AbstractMasterPopTableFactory.__init__(self)
self._entries = None
self._n_addresses = 0
def get_master_population_table_size(self, vertex_slice, in_edges):
"""
:param vertex_slice:the slice of the partitionable vertex that the\
partitioned vertex will be holding
:param in_edges: the in coming edges for the partitioned vertex this\
master pop is associated with.
:return: the size the master pop table will take in SDRAM (in bytes)
"""
# Entry for each sub-edge - but don't know the subedges yet, so
# assume multiple entries for each edge
n_subvertices = 0
n_entries = 0
for in_edge in in_edges:
if isinstance(in_edge, ProjectionPartitionableEdge):
# TODO: Fix this to be more accurate!
# May require modification to the master population table
# Get the number of atoms per core incoming
max_atoms = sys.maxint
edge_pre_vertex = in_edge.pre_vertex
if isinstance(edge_pre_vertex, AbstractPartitionableVertex):
max_atoms = in_edge.pre_vertex.get_max_atoms_per_core()
if in_edge.pre_vertex.n_atoms < max_atoms:
max_atoms = in_edge.pre_vertex.n_atoms
# Get the number of likely subvertices
n_edge_subvertices = int(math.ceil(
float(in_edge.pre_vertex.n_atoms) / float(max_atoms)))
n_subvertices += n_edge_subvertices
n_entries += (
n_edge_subvertices * len(in_edge.synapse_information))
# Multiply by 2 to get an upper bound
return (
(n_subvertices * 2 * _MasterPopEntry.MASTER_POP_ENTRY_SIZE_BYTES) +
(n_entries * 2 * _MasterPopEntry.ADDRESS_LIST_ENTRY_SIZE_BYTES) +
8)
def get_exact_master_population_table_size(
self, subvertex, partitioned_graph, graph_mapper):
"""
:return: the size the master pop table will take in SDRAM (in bytes)
"""
in_edges = partitioned_graph.incoming_subedges_from_subvertex(subvertex)
n_subvertices = len(in_edges)
n_entries = 0
for in_edge in in_edges:
if isinstance(in_edge, ProjectionPartitionedEdge):
edge = graph_mapper.\
get_partitionable_edge_from_partitioned_edge(in_edge)
n_entries += len(edge.synapse_information)
# Multiply by 2 to get an upper bound
return (
(n_subvertices * 2 * _MasterPopEntry.MASTER_POP_ENTRY_SIZE_BYTES) +
(n_entries * 2 * _MasterPopEntry.ADDRESS_LIST_ENTRY_SIZE_BYTES) +
8)
def get_allowed_row_length(self, row_length):
"""
:param row_length: the row length being considered
:return: the row length available
"""
if row_length > 255:
raise Exception("Only rows of up to 255 entries are allowed")
return row_length
def get_next_allowed_address(self, next_address):
"""
:param next_address: The next address that would be used
:return: The next address that can be used following next_address
"""
return next_address
def initialise_table(self, spec, master_population_table_region):
""" Initialises the master pop data structure
:param spec: the dsg writer
:param master_population_table_region: the region in memory that the\
master pop table will be written in
:return:
"""
self._entries = dict()
self._n_entries = 0
def update_master_population_table(
self, spec, block_start_addr, row_length, keys_and_masks,
master_pop_table_region):
""" Adds a entry in the binary search to deal with the synaptic matrix
:param spec: the writer for dsg
:param block_start_addr: where the synaptic matrix block starts
:param row_length: how long in bytes each synaptic entry is
:param keys_and_masks: the keys and masks for this master pop entry
:param master_pop_table_region: the region id for the master pop
:return: None
"""
key_and_mask = keys_and_masks[0]
if key_and_mask.key not in self._entries:
self._entries[key_and_mask.key] = _MasterPopEntry(
key_and_mask.key, key_and_mask.mask)
self._entries[key_and_mask.key].append(
block_start_addr / 4, row_length)
self._n_addresses += 1
def finish_master_pop_table(self, spec, master_pop_table_region):
""" Completes any operations required after all entries have been added
:param spec: the writer for the dsg
:param master_pop_table_region: the region to which the master pop\
resides in
:return: None
"""
spec.switch_write_focus(region=master_pop_table_region)
# sort entries by key
entries = sorted(
self._entries.values(),
key=lambda pop_table_entry: pop_table_entry.routing_key)
# write no master pop entries and the address list size
n_entries = len(entries)
spec.write_value(n_entries)
spec.write_value(self._n_addresses)
# Generate the table and list as arrays
pop_table = numpy.zeros(
n_entries, dtype=self.MASTER_POP_ENTRY_DTYPE)
address_list = numpy.zeros(
self._n_addresses, dtype=self.ADDRESS_LIST_DTYPE)
start = 0
for i, entry in enumerate(entries):
pop_table[i]["key"] = entry.routing_key
pop_table[i]["mask"] = entry.mask
pop_table[i]["start"] = start
count = len(entry.addresses_and_row_lengths)
pop_table[i]["count"] = count
for j, (address, row_length) in enumerate(
entry.addresses_and_row_lengths):
address_list[start + j] = (address << 8) | row_length
start += count
# Write the arrays
spec.write_array(pop_table.view("<u4"))
spec.write_array(address_list)
del self._entries
self._entries = None
self._n_addresses = 0
def extract_synaptic_matrix_data_location(
self, incoming_key_combo, master_pop_base_mem_address, txrx,
chip_x, chip_y):
# get entries in master pop
count_data = txrx.read_memory(
chip_x, chip_y, master_pop_base_mem_address, 8)
n_entries, n_addresses = struct.unpack("<II", buffer(count_data))
n_entry_bytes = (
n_entries * _MasterPopEntry.MASTER_POP_ENTRY_SIZE_BYTES)
n_address_bytes = (
n_addresses * _MasterPopEntry.ADDRESS_LIST_ENTRY_SIZE_BYTES)
# read in master pop structure
full_data = txrx.read_memory(
chip_x, chip_y, master_pop_base_mem_address + 8,
n_entry_bytes + n_address_bytes)
# convert into a numpy arrays
entry_list = numpy.frombuffer(
full_data, 'uint8', n_entry_bytes, 0).view(
dtype=self.MASTER_POP_ENTRY_DTYPE)
address_list = numpy.frombuffer(
full_data, 'uint8', n_address_bytes, n_entry_bytes).view(
dtype=self.ADDRESS_LIST_DTYPE)
entry = self._locate_entry(entry_list, incoming_key_combo)
if entry is None:
return []
addresses = list()
for i in range(entry["start"], entry["start"] + entry["count"]):
address_and_row_length = address_list[i]
addresses.append((
(address_and_row_length & 0xFF),
(address_and_row_length >> 8) * 4))
return addresses
def _locate_entry(self, entries, key):
""" searches the binary tree structure for the correct entry.
:param key: the key to search the master pop table for a given entry
:return the entry for this given key
:rtype: _MasterPopEntry
"""
imin = 0
imax = len(entries)
while imin < imax:
imid = (imax + imin) / 2
entry = entries[imid]
if key & entry["mask"] == entry["key"]:
return entry
if key > entry["key"]:
imin = imid + 1
else:
imax = imid
return None
def get_edge_constraints(self):
""" Returns any constraints placed on the edges because of having this\
master pop table implemented in the cores.
:return:
"""
return list() | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/master_pop_table_generators/master_pop_table_as_binary_search.py | 0.658308 | 0.459864 | master_pop_table_as_binary_search.py | pypi |
from spynnaker.pyNN.models.neuron.plasticity.stdp.common \
import plasticity_helpers
from spynnaker.pyNN.models.neuron.plasticity.stdp.timing_dependence\
.abstract_timing_dependence import AbstractTimingDependence
from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure\
.synapse_structure_weight_only import SynapseStructureWeightOnly
import logging
logger = logging.getLogger(__name__)
LOOKUP_TAU_PLUS_SIZE = 256
LOOKUP_TAU_PLUS_SHIFT = 0
LOOKUP_TAU_MINUS_SIZE = 256
LOOKUP_TAU_MINUS_SHIFT = 0
LOOKUP_TAU_X_SIZE = 256
LOOKUP_TAU_X_SHIFT = 2
LOOKUP_TAU_Y_SIZE = 256
LOOKUP_TAU_Y_SHIFT = 2
class TimingDependencePfisterSpikeTriplet(AbstractTimingDependence):
# noinspection PyPep8Naming
def __init__(self, tau_plus, tau_minus, tau_x, tau_y):
AbstractTimingDependence.__init__(self)
self._tau_plus = tau_plus
self._tau_minus = tau_minus
self._tau_x = tau_x
self._tau_y = tau_y
self._synapse_structure = SynapseStructureWeightOnly()
# provenance data
self._tau_plus_last_entry = None
self._tau_minus_last_entry = None
self._tau_x_last_entry = None
self._tau_y_last_entry = None
@property
def tau_plus(self):
return self._tau_plus
@property
def tau_minus(self):
return self._tau_minus
@property
def tau_x(self):
return self._tau_x
@property
def tau_y(self):
return self._tau_y
def is_same_as(self, timing_dependence):
if not isinstance(
timing_dependence, TimingDependencePfisterSpikeTriplet):
return False
return (
(self._tau_plus == timing_dependence.tau_plus) and
(self._tau_minus == timing_dependence.tau_minus) and
(self._tau_x == timing_dependence.tau_x) and
(self._tau_y == timing_dependence.tau_y))
@property
def vertex_executable_suffix(self):
return "pfister_triplet"
@property
def pre_trace_n_bytes(self):
# Triplet rule trace entries consists of two 16-bit traces - R1 and R2
return 4
def get_parameters_sdram_usage_in_bytes(self):
return (2 * (LOOKUP_TAU_PLUS_SIZE + LOOKUP_TAU_MINUS_SIZE +
LOOKUP_TAU_X_SIZE + LOOKUP_TAU_Y_SIZE))
@property
def n_weight_terms(self):
return 2
def write_parameters(self, spec, machine_time_step, weight_scales):
# Check timestep is valid
if machine_time_step != 1000:
raise NotImplementedError(
"STDP LUT generation currently only supports 1ms timesteps")
# Write lookup tables
self._tau_plus_last_entry = plasticity_helpers.write_exp_lut(
spec, self._tau_plus, LOOKUP_TAU_PLUS_SIZE,
LOOKUP_TAU_PLUS_SHIFT)
self._tau_minus_last_entry = plasticity_helpers.write_exp_lut(
spec, self._tau_minus, LOOKUP_TAU_MINUS_SIZE,
LOOKUP_TAU_MINUS_SHIFT)
self._tau_x_last_entry = plasticity_helpers.write_exp_lut(
spec, self._tau_x, LOOKUP_TAU_X_SIZE, LOOKUP_TAU_X_SHIFT)
self._tau_y_last_entry = plasticity_helpers.write_exp_lut(
spec, self._tau_y, LOOKUP_TAU_Y_SIZE, LOOKUP_TAU_Y_SHIFT)
@property
def synaptic_structure(self):
return self._synapse_structure
def get_provenance_data(self, pre_population_label, post_population_label):
prov_data = list()
prov_data.append(plasticity_helpers.get_lut_provenance(
pre_population_label, post_population_label,
"PfisterSpikeTripletRule", "tau_plus_last_entry",
"tau_plus", self._tau_plus_last_entry))
prov_data.append(plasticity_helpers.get_lut_provenance(
pre_population_label, post_population_label,
"PfisterSpikeTripletRule", "tau_minus_last_entry",
"tau_minus", self._tau_minus_last_entry))
prov_data.append(plasticity_helpers.get_lut_provenance(
pre_population_label, post_population_label,
"PfisterSpikeTripletRule", "tau_x_last_entry",
"tau_x", self._tau_x_last_entry))
prov_data.append(plasticity_helpers.get_lut_provenance(
pre_population_label, post_population_label,
"PfisterSpikeTripletRule", "tau_y_last_entry",
"tau_y", self._tau_y_last_entry))
return prov_data | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_pfister_spike_triplet.py | 0.778818 | 0.320409 | timing_dependence_pfister_spike_triplet.py | pypi |
from spynnaker.pyNN.models.neuron.plasticity.stdp.common \
import plasticity_helpers
from spynnaker.pyNN.models.neuron.plasticity.stdp.timing_dependence\
.abstract_timing_dependence import AbstractTimingDependence
from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure\
.synapse_structure_weight_only import SynapseStructureWeightOnly
import logging
logger = logging.getLogger(__name__)
LOOKUP_TAU_PLUS_SIZE = 256
LOOKUP_TAU_PLUS_SHIFT = 0
LOOKUP_TAU_MINUS_SIZE = 256
LOOKUP_TAU_MINUS_SHIFT = 0
class TimingDependenceSpikePair(AbstractTimingDependence):
def __init__(self, tau_plus=20.0, tau_minus=20.0, nearest=False):
AbstractTimingDependence.__init__(self)
self._tau_plus = tau_plus
self._tau_minus = tau_minus
self._nearest = nearest
self._synapse_structure = SynapseStructureWeightOnly()
# provenance data
self._tau_plus_last_entry = None
self._tau_minus_last_entry = None
@property
def tau_plus(self):
return self._tau_plus
@property
def tau_minus(self):
return self._tau_minus
@property
def nearest(self):
return self._nearest
def is_same_as(self, timing_dependence):
if not isinstance(timing_dependence, TimingDependenceSpikePair):
return False
return (
(self._tau_plus == timing_dependence._tau_plus) and
(self._tau_minus == timing_dependence._tau_minus) and
(self._nearest == timing_dependence._nearest))
@property
def vertex_executable_suffix(self):
return "nearest_pair" if self._nearest else "pair"
@property
def pre_trace_n_bytes(self):
# Pair rule requires no pre-synaptic trace when only the nearest
# Neighbours are considered and, a single 16-bit R1 trace
return 0 if self._nearest else 2
def get_parameters_sdram_usage_in_bytes(self):
return 2 * (LOOKUP_TAU_PLUS_SIZE + LOOKUP_TAU_MINUS_SIZE)
@property
def n_weight_terms(self):
return 1
def write_parameters(self, spec, machine_time_step, weight_scales):
# Check timestep is valid
if machine_time_step != 1000:
raise NotImplementedError(
"STDP LUT generation currently only supports 1ms timesteps")
# Write lookup tables
self._tau_plus_last_entry = plasticity_helpers.write_exp_lut(
spec, self._tau_plus, LOOKUP_TAU_PLUS_SIZE,
LOOKUP_TAU_PLUS_SHIFT)
self._tau_minus_last_entry = plasticity_helpers.write_exp_lut(
spec, self._tau_minus, LOOKUP_TAU_MINUS_SIZE,
LOOKUP_TAU_MINUS_SHIFT)
@property
def synaptic_structure(self):
return self._synapse_structure
def get_provenance_data(self, pre_population_label, post_population_label):
prov_data = list()
prov_data.append(plasticity_helpers.get_lut_provenance(
pre_population_label, post_population_label, "SpikePairRule",
"tau_plus_last_entry", "tau_plus", self._tau_plus_last_entry))
prov_data.append(plasticity_helpers.get_lut_provenance(
pre_population_label, post_population_label, "SpikePairRule",
"tau_minus_last_entry", "tau_minus", self._tau_minus_last_entry))
return prov_data | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/plasticity/stdp/timing_dependence/timing_dependence_spike_pair.py | 0.848643 | 0.337122 | timing_dependence_spike_pair.py | pypi |
from data_specification.enums.data_type import DataType
from spynnaker.pyNN.models.neuron.plasticity.stdp.weight_dependence\
.abstract_weight_dependence import AbstractWeightDependence
class WeightDependenceMultiplicative(AbstractWeightDependence):
def __init__(self, w_min=0.0, w_max=1.0, A_plus=0.01, A_minus=0.01):
AbstractWeightDependence.__init__(self)
self._w_min = w_min
self._w_max = w_max
self._A_plus = A_plus
self._A_minus = A_minus
@property
def w_min(self):
return self._w_min
@property
def w_max(self):
return self._w_max
@property
def A_plus(self):
return self._A_plus
@property
def A_minus(self):
return self._A_minus
def is_same_as(self, weight_dependence):
if not isinstance(weight_dependence, WeightDependenceMultiplicative):
return False
return (
(self._w_min == weight_dependence._w_min) and
(self._w_max == weight_dependence._w_max) and
(self._A_plus == weight_dependence._A_plus) and
(self._A_minus == weight_dependence._A_minus))
@property
def vertex_executable_suffix(self):
return "multiplicative"
def get_parameters_sdram_usage_in_bytes(
self, n_synapse_types, n_weight_terms):
if n_weight_terms != 1:
raise NotImplementedError(
"Multiplicative weight dependence only supports single terms")
return (4 * 4) * n_synapse_types
def write_parameters(
self, spec, machine_time_step, weight_scales, n_weight_terms):
if n_weight_terms != 1:
raise NotImplementedError(
"Multiplicative weight dependence only supports single terms")
# Loop through each synapse type's weight scale
for w in weight_scales:
spec.write_value(
data=int(round(self._w_min * w)), data_type=DataType.INT32)
spec.write_value(
data=int(round(self._w_max * w)), data_type=DataType.INT32)
spec.write_value(
data=int(round(self._A_plus * w)), data_type=DataType.INT32)
spec.write_value(
data=int(round(self._A_minus * w)), data_type=DataType.INT32)
@property
def weight_maximum(self):
return self._w_max | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_multiplicative.py | 0.845974 | 0.427994 | weight_dependence_multiplicative.py | pypi |
from data_specification.enums.data_type import DataType
from spynnaker.pyNN.models.neuron.plasticity.stdp.weight_dependence\
.abstract_weight_dependence import AbstractWeightDependence
class WeightDependenceAdditive(AbstractWeightDependence):
# noinspection PyPep8Naming
def __init__(
self, w_min=0.0, w_max=1.0, A_plus=0.01, A_minus=0.01,
A3_plus=None, A3_minus=None):
AbstractWeightDependence.__init__(self)
self._w_min = w_min
self._w_max = w_max
self._A_plus = A_plus
self._A_minus = A_minus
self._A3_plus = A3_plus
self._A3_minus = A3_minus
@property
def w_min(self):
return self._w_min
@property
def w_max(self):
return self._w_max
@property
def A_plus(self):
return self._A_plus
@property
def A_minus(self):
return self._A_minus
@property
def A3_plus(self):
return self._A3_plus
@property
def A3_minus(self):
return self._A3_minus
def is_same_as(self, weight_dependence):
if not isinstance(weight_dependence, WeightDependenceAdditive):
return False
return (
(self._w_min == weight_dependence._w_min) and
(self._w_max == weight_dependence._w_max) and
(self._A_plus == weight_dependence._A_plus) and
(self._A_minus == weight_dependence._A_minus) and
(self._A3_plus == weight_dependence._A3_plus) and
(self._A3_minus == weight_dependence._A3_minus))
@property
def vertex_executable_suffix(self):
return "additive"
def get_parameters_sdram_usage_in_bytes(
self, n_synapse_types, n_weight_terms):
if n_weight_terms == 1:
return (4 * 4) * n_synapse_types
elif n_weight_terms == 2:
return (6 * 4) * n_synapse_types
else:
raise NotImplementedError(
"Additive weight dependence only supports one or two terms")
def write_parameters(
self, spec, machine_time_step, weight_scales, n_weight_terms):
# Loop through each synapse type's weight scale
for w in weight_scales:
# Scale the weights
spec.write_value(
data=int(round(self._w_min * w)), data_type=DataType.INT32)
spec.write_value(
data=int(round(self._w_max * w)), data_type=DataType.INT32)
# Based on http://data.andrewdavison.info/docs/PyNN/_modules/pyNN
# /standardmodels/synapses.html
# Pre-multiply A+ and A- by Wmax
spec.write_value(
data=int(round(self._A_plus * self._w_max * w)),
data_type=DataType.INT32)
spec.write_value(
data=int(round(self._A_minus * self._w_max * w)),
data_type=DataType.INT32)
# If triplet term is required, write A3+ and A3-, also multiplied
# by Wmax
if n_weight_terms == 2:
spec.write_value(
data=int(round(self._A3_plus * self._w_max * w)),
data_type=DataType.INT32)
spec.write_value(
data=int(round(self._A3_minus * self._w_max * w)),
data_type=DataType.INT32)
elif n_weight_terms != 1:
raise NotImplementedError(
"Additive weight dependence only supports one or two"
" terms")
@property
def weight_maximum(self):
return self._w_max | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/plasticity/stdp/weight_dependence/weight_dependence_additive.py | 0.857037 | 0.411406 | weight_dependence_additive.py | pypi |
import numpy
import math
from spynnaker.pyNN.models.neuron.synapse_dynamics.abstract_synapse_dynamics \
import AbstractSynapseDynamics
from spynnaker.pyNN.models.neural_projections.connectors.abstract_connector \
import AbstractConnector
from spynnaker.pyNN.models.neuron.synapse_dynamics\
.abstract_static_synapse_dynamics import AbstractStaticSynapseDynamics
from spynnaker.pyNN.models.neuron.synapse_io.abstract_synapse_io \
import AbstractSynapseIO
_N_HEADER_WORDS = 3
class SynapseIORowBased(AbstractSynapseIO):
""" A SynapseRowIO implementation that uses a row for each source neuron,
where each row consists of a fixed region, a plastic region, and a\
fixed-plastic region (this is the bits of the plastic row that don't\
actually change). The plastic region structure is determined by the\
synapse dynamics of the connector.
"""
def __init__(self, machine_time_step):
AbstractSynapseIO.__init__(self)
self._machine_time_step = machine_time_step
def get_maximum_delay_supported_in_ms(self):
# There are 16 slots, one per time step
return 16 * (self._machine_time_step / 1000.0)
def _n_words(self, n_bytes):
return math.ceil(float(n_bytes) / 4.0)
def get_sdram_usage_in_bytes(
self, synapse_info, n_pre_slices, pre_slice_index,
n_post_slices, post_slice_index, pre_vertex_slice,
post_vertex_slice, n_delay_stages, population_table):
# Find the maximum row length - i.e. the maximum number of bytes
# that will be needed by any row for both rows with delay extensions
# and rows without
max_delay_supported = self.get_maximum_delay_supported_in_ms()
max_delay = max_delay_supported * (n_delay_stages + 1)
# delay point where delay extensions start
min_delay_for_delay_extension = (
max_delay_supported + numpy.finfo(numpy.double).tiny)
# row length for the undelayed synaptic matrix
max_undelayed_row_length = synapse_info.connector\
.get_n_connections_from_pre_vertex_maximum(
n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
0, max_delay_supported)
# determine the max row length in the delay extension
max_delayed_row_length = 0
if n_delay_stages > 0:
max_delayed_row_length = synapse_info.connector\
.get_n_connections_from_pre_vertex_maximum(
n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
min_delay_for_delay_extension, max_delay)
# Get the row sizes
dynamics = synapse_info.synapse_dynamics
undelayed_size = 0
delayed_size = 0
if isinstance(dynamics, AbstractStaticSynapseDynamics):
undelayed_size = dynamics.get_n_words_for_static_connections(
max_undelayed_row_length)
delayed_size = dynamics.get_n_words_for_static_connections(
max_delayed_row_length)
else:
undelayed_size = dynamics.get_n_words_for_plastic_connections(
max_undelayed_row_length)
delayed_size = dynamics.get_n_words_for_plastic_connections(
max_delayed_row_length)
# Adjust for the allowed row lengths from the population table
undelayed_max_bytes = population_table.get_allowed_row_length(
undelayed_size) * 4
delayed_max_bytes = population_table.get_allowed_row_length(
delayed_size) * 4
# Add on the header words and multiply by the number of rows in the
# block
n_bytes_undelayed = 0
if undelayed_max_bytes > 0:
n_bytes_undelayed = (
((_N_HEADER_WORDS * 4) + undelayed_max_bytes) *
pre_vertex_slice.n_atoms)
n_bytes_delayed = 0
if delayed_max_bytes > 0:
n_bytes_delayed = (
((_N_HEADER_WORDS * 4) + delayed_max_bytes) *
pre_vertex_slice.n_atoms * n_delay_stages)
return n_bytes_undelayed, n_bytes_delayed
@staticmethod
def _get_max_row_length_and_row_data(
connections, row_indices, n_rows, post_vertex_slice,
n_synapse_types, population_table, synapse_dynamics):
ff_data, ff_size = None, None
fp_data, pp_data, fp_size, pp_size = None, None, None, None
if isinstance(synapse_dynamics, AbstractStaticSynapseDynamics):
# Get the static data
ff_data, ff_size = synapse_dynamics.get_static_synaptic_data(
connections, row_indices, n_rows, post_vertex_slice,
n_synapse_types)
# Blank the plastic data
fp_data = [numpy.zeros(0, dtype="uint32") for _ in range(n_rows)]
pp_data = [numpy.zeros(0, dtype="uint32") for _ in range(n_rows)]
fp_size = [numpy.zeros(1, dtype="uint32") for _ in range(n_rows)]
pp_size = [numpy.zeros(1, dtype="uint32") for _ in range(n_rows)]
else:
# Blank the static data
ff_data = [numpy.zeros(0, dtype="uint32") for _ in range(n_rows)]
ff_size = [numpy.zeros(1, dtype="uint32") for _ in range(n_rows)]
# Get the plastic data
fp_data, pp_data, fp_size, pp_size = \
synapse_dynamics.get_plastic_synaptic_data(
connections, row_indices, n_rows, post_vertex_slice,
n_synapse_types)
# Add some padding
row_lengths = [
3 + pp_data[i].size + fp_data[i].size + ff_data[i].size
for i in range(n_rows)]
max_length = max(row_lengths) - _N_HEADER_WORDS
max_row_length = population_table.get_allowed_row_length(max_length)
padding = [
numpy.zeros(
max_row_length - (row_length - _N_HEADER_WORDS),
dtype="uint32")
for row_length in row_lengths]
# Join the bits into rows
items_to_join = [
pp_size, pp_data, ff_size, fp_size, ff_data, fp_data, padding]
rows = [numpy.concatenate(items) for items in zip(*items_to_join)]
row_data = numpy.concatenate(rows)
# Return the data
return max_row_length, row_data
def get_synapses(
self, synapse_info, pre_slices, pre_slice_index,
post_slices, post_slice_index, pre_vertex_slice,
post_vertex_slice, n_delay_stages, population_table,
n_synapse_types, weight_scales):
# Get delays in timesteps
max_delay = self.get_maximum_delay_supported_in_ms()
if max_delay is not None:
max_delay *= (1000.0 / self._machine_time_step)
# Get the actual connections
connections = synapse_info.connector.create_synaptic_block(
pre_slices, pre_slice_index, post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice,
synapse_info.synapse_type)
# Convert delays to timesteps
connections["delay"] = numpy.rint(
connections["delay"] * (1000.0 / self._machine_time_step))
# Scale weights
connections["weight"] = (
connections["weight"] *
weight_scales[synapse_info.synapse_type])
# Split the connections up based on the delays
undelayed_connections = connections
delayed_connections = None
if max_delay is not None:
plastic_delay_mask = (connections["delay"] <= max_delay)
undelayed_connections = connections[
numpy.where(plastic_delay_mask)]
delayed_connections = connections[
numpy.where(~plastic_delay_mask)]
else:
delayed_connections = numpy.zeros(
0, dtype=AbstractConnector.NUMPY_SYNAPSES_DTYPE)
del connections
# Get the data for the connections
row_data = numpy.zeros(0, dtype="uint32")
max_row_length = 0
if len(undelayed_connections) > 0:
# Get which row each connection will go into
undelayed_row_indices = (
undelayed_connections["source"] - pre_vertex_slice.lo_atom)
max_row_length, row_data = self._get_max_row_length_and_row_data(
undelayed_connections, undelayed_row_indices,
pre_vertex_slice.n_atoms, post_vertex_slice, n_synapse_types,
population_table, synapse_info.synapse_dynamics)
del undelayed_row_indices
del undelayed_connections
# Get the data for the delayed connections
delayed_row_data = numpy.zeros(0, dtype="uint32")
max_delayed_row_length = 0
stages = numpy.zeros(0)
delayed_source_ids = numpy.zeros(0)
if len(delayed_connections) > 0:
# Get the delay stages and which row each delayed connection will
# go into
stages = numpy.floor((numpy.round(
delayed_connections["delay"] - 1.0)) / max_delay)
delayed_row_indices = (
(delayed_connections["source"] - pre_vertex_slice.lo_atom) +
((stages - 1) * pre_vertex_slice.n_atoms))
delayed_connections["delay"] -= max_delay * stages
delayed_source_ids = (
delayed_connections["source"] - pre_vertex_slice.lo_atom)
# Get the data
max_delayed_row_length, delayed_row_data = \
self._get_max_row_length_and_row_data(
delayed_connections, delayed_row_indices,
pre_vertex_slice.n_atoms * n_delay_stages,
post_vertex_slice, n_synapse_types, population_table,
synapse_info.synapse_dynamics)
del delayed_row_indices
del delayed_connections
return (row_data, max_row_length, delayed_row_data,
max_delayed_row_length, delayed_source_ids, stages)
@staticmethod
def _get_static_data(row_data, dynamics):
n_rows = row_data.shape[0]
ff_size = row_data[:, 1]
ff_words = dynamics.get_n_static_words_per_row(ff_size)
ff_start = _N_HEADER_WORDS
ff_end = ff_start + ff_words
return (
ff_size,
[row_data[row, ff_start:ff_end[row]] for row in range(n_rows)])
@staticmethod
def _get_plastic_data(row_data, dynamics):
n_rows = row_data.shape[0]
pp_size = row_data[:, 0]
pp_words = dynamics.get_n_plastic_plastic_words_per_row(pp_size)
fp_size = row_data[numpy.arange(n_rows), pp_words + 2]
fp_words = dynamics.get_n_fixed_plastic_words_per_row(fp_size)
fp_start = pp_size + _N_HEADER_WORDS
fp_end = fp_start + fp_words
return (
pp_size,
[row_data[row, 1:pp_words[row] + 1] for row in range(n_rows)],
fp_size,
[row_data[row, fp_start[row]:fp_end[row]] for row in range(n_rows)]
)
def read_synapses(
self, synapse_info, pre_vertex_slice, post_vertex_slice,
max_row_length, delayed_max_row_length, n_synapse_types,
weight_scales, data, delayed_data, n_delay_stages):
# Translate the data into rows
row_data = None
delayed_row_data = None
row_stage = None
connection_min_delay = None
connection_source_extra = None
if data is not None and len(data) > 0:
row_data = numpy.frombuffer(data, dtype="<u4").reshape(
-1, (max_row_length + _N_HEADER_WORDS))
if delayed_data is not None and len(delayed_data) > 0:
delayed_row_data = numpy.frombuffer(
delayed_data, dtype="<u4").reshape(
-1, (delayed_max_row_length + _N_HEADER_WORDS))
dynamics = synapse_info.synapse_dynamics
connections = list()
if isinstance(dynamics, AbstractStaticSynapseDynamics):
# Read static data
if row_data is not None and len(row_data) > 0:
ff_size, ff_data = self._get_static_data(row_data, dynamics)
undelayed_connections = dynamics.read_static_synaptic_data(
post_vertex_slice, n_synapse_types, ff_size, ff_data)
undelayed_connections["source"] += pre_vertex_slice.lo_atom
connections.append(undelayed_connections)
if delayed_row_data is not None and len(delayed_row_data) > 0:
ff_size, ff_data = self._get_static_data(
delayed_row_data, dynamics)
delayed_connections = dynamics.read_static_synaptic_data(
post_vertex_slice, n_synapse_types, ff_size, ff_data)
# Use the row index to work out the actual delay and source
n_synapses = dynamics.get_n_synapses_in_rows(ff_size)
row_stage = numpy.array([
(i / pre_vertex_slice.n_atoms)
for i in range(len(n_synapses))], dtype="uint32")
row_min_delay = (row_stage + 1) * 16
connection_min_delay = numpy.concatenate([
numpy.repeat(row_min_delay[i], n_synapses[i])
for i in range(len(n_synapses))])
connection_source_extra = numpy.concatenate([
numpy.repeat(
row_stage[i] * pre_vertex_slice.n_atoms, n_synapses[i])
for i in range(len(n_synapses))])
delayed_connections["source"] -= connection_source_extra
delayed_connections["source"] += pre_vertex_slice.lo_atom
delayed_connections["delay"] += connection_min_delay
connections.append(delayed_connections)
else:
# Read plastic data
if row_data is not None:
pp_size, pp_data, fp_size, fp_data = self._get_plastic_data(
row_data, dynamics)
undelayed_connections = dynamics.read_plastic_synaptic_data(
post_vertex_slice, n_synapse_types, pp_size, pp_data,
fp_size, fp_data)
undelayed_connections["source"] += pre_vertex_slice.lo_atom
connections.append(undelayed_connections)
if delayed_row_data is not None:
pp_size, pp_data, fp_size, fp_data = self._get_plastic_data(
delayed_row_data, dynamics)
delayed_connections = dynamics.read_plastic_synaptic_data(
post_vertex_slice, n_synapse_types, pp_size, pp_data,
fp_size, fp_data)
# Use the row index to work out the actual delay and source
n_synapses = dynamics.get_n_synapses_in_rows(pp_size, fp_size)
row_stage = numpy.array([
(i / pre_vertex_slice.n_atoms)
for i in range(len(n_synapses))], dtype="uint32")
row_min_delay = (row_stage + 1) * 16
connection_min_delay = numpy.concatenate([
numpy.repeat(row_min_delay[i], n_synapses[i])
for i in range(len(n_synapses))])
connection_source_extra = numpy.concatenate([
numpy.repeat(
row_stage[i] * pre_vertex_slice.n_atoms, n_synapses[i])
for i in range(len(n_synapses))])
delayed_connections["source"] -= connection_source_extra
delayed_connections["source"] += pre_vertex_slice.lo_atom
delayed_connections["delay"] += connection_min_delay
connections.append(delayed_connections)
# Join the connections into a single list
if len(connections) > 0:
connections = numpy.concatenate(connections)
# Return the delays values to milliseconds
connections["delay"] = (
connections["delay"] / (1000.0 / self._machine_time_step))
# Undo the weight scaling
connections["weight"] = (
connections["weight"] /
weight_scales[synapse_info.synapse_type])
else:
connections = numpy.zeros(
0, dtype=AbstractSynapseDynamics.NUMPY_CONNECTORS_DTYPE)
# Return the connections
return connections
def get_block_n_bytes(self, max_row_length, n_rows):
return ((_N_HEADER_WORDS + max_row_length) * 4) * n_rows | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/synapse_io/synapse_io_row_based.py | 0.782205 | 0.393997 | synapse_io_row_based.py | pypi |
from spynnaker.pyNN.models.neuron.neuron_models.neuron_model_izh \
import NeuronModelIzh
from spynnaker.pyNN.models.neuron.synapse_types.synapse_type_exponential \
import SynapseTypeExponential
from spynnaker.pyNN.models.neuron.input_types.input_type_current \
import InputTypeCurrent
from spynnaker.pyNN.models.neuron.threshold_types.threshold_type_static \
import ThresholdTypeStatic
from spynnaker.pyNN.models.neuron.abstract_population_vertex \
import AbstractPopulationVertex
_IZK_THRESHOLD = 30.0
class IzkCurrExp(AbstractPopulationVertex):
_model_based_max_atoms_per_core = 255
default_parameters = {
'a': 0.02, 'c': -65.0, 'b': 0.2, 'd': 2.0, 'i_offset': 0,
'u_init': -14.0, 'v_init': -70.0, 'tau_syn_E': 5.0, 'tau_syn_I': 5.0}
# noinspection PyPep8Naming
def __init__(
self, n_neurons, machine_time_step, timescale_factor,
spikes_per_second=None, ring_buffer_sigma=None,
incoming_spike_buffer_size=None, constraints=None, label=None,
a=default_parameters['a'], b=default_parameters['b'],
c=default_parameters['c'], d=default_parameters['d'],
i_offset=default_parameters['i_offset'],
u_init=default_parameters['u_init'],
v_init=default_parameters['v_init'],
tau_syn_E=default_parameters['tau_syn_E'],
tau_syn_I=default_parameters['tau_syn_I']):
neuron_model = NeuronModelIzh(
n_neurons, machine_time_step, a, b, c, d, v_init, u_init, i_offset)
synapse_type = SynapseTypeExponential(
n_neurons, machine_time_step, tau_syn_E, tau_syn_I)
input_type = InputTypeCurrent()
threshold_type = ThresholdTypeStatic(n_neurons, _IZK_THRESHOLD)
AbstractPopulationVertex.__init__(
self, n_neurons=n_neurons, binary="IZK_curr_exp.aplx", label=label,
max_atoms_per_core=IzkCurrExp._model_based_max_atoms_per_core,
machine_time_step=machine_time_step,
timescale_factor=timescale_factor,
spikes_per_second=spikes_per_second,
ring_buffer_sigma=ring_buffer_sigma,
incoming_spike_buffer_size=incoming_spike_buffer_size,
model_name="IZK_curr_exp", neuron_model=neuron_model,
input_type=input_type, synapse_type=synapse_type,
threshold_type=threshold_type, constraints=constraints)
@staticmethod
def set_model_max_atoms_per_core(new_value):
IzkCurrExp._model_based_max_atoms_per_core = new_value | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/builds/izk_curr_exp.py | 0.678859 | 0.424114 | izk_curr_exp.py | pypi |
from spynnaker.pyNN.models.neuron.neuron_models\
.neuron_model_leaky_integrate_and_fire \
import NeuronModelLeakyIntegrateAndFire
from spynnaker.pyNN.models.neuron.input_types.input_type_conductance \
import InputTypeConductance
from spynnaker.pyNN.models.neuron.synapse_types.synapse_type_exponential \
import SynapseTypeExponential
from spynnaker.pyNN.models.neuron.threshold_types.threshold_type_static \
import ThresholdTypeStatic
from spynnaker.pyNN.models.neuron.abstract_population_vertex \
import AbstractPopulationVertex
class IFCondExp(AbstractPopulationVertex):
""" Leaky integrate and fire neuron with an exponentially decaying \
conductance input
"""
_model_based_max_atoms_per_core = 255
default_parameters = {
'tau_m': 20.0, 'cm': 1.0, 'e_rev_E': 0.0, 'e_rev_I': -70.0,
'v_rest': -65.0, 'v_reset': -65.0, 'v_thresh': -50.0,
'tau_syn_E': 5.0, 'tau_syn_I': 5.0, 'tau_refrac': 0.1,
'i_offset': 0}
def __init__(
self, n_neurons, machine_time_step, timescale_factor,
spikes_per_second=None, ring_buffer_sigma=None,
incoming_spike_buffer_size=None, constraints=None, label=None,
tau_m=default_parameters['tau_m'], cm=default_parameters['cm'],
v_rest=default_parameters['v_rest'],
v_reset=default_parameters['v_reset'],
v_thresh=default_parameters['v_thresh'],
tau_syn_E=default_parameters['tau_syn_E'],
tau_syn_I=default_parameters['tau_syn_I'],
tau_refrac=default_parameters['tau_refrac'],
i_offset=default_parameters['i_offset'],
e_rev_E=default_parameters['e_rev_E'],
e_rev_I=default_parameters['e_rev_I'], v_init=None):
neuron_model = NeuronModelLeakyIntegrateAndFire(
n_neurons, machine_time_step, v_init, v_rest, tau_m, cm, i_offset,
v_reset, tau_refrac)
synapse_type = SynapseTypeExponential(
n_neurons, machine_time_step, tau_syn_E, tau_syn_I)
input_type = InputTypeConductance(n_neurons, e_rev_E, e_rev_I)
threshold_type = ThresholdTypeStatic(n_neurons, v_thresh)
AbstractPopulationVertex.__init__(
self, n_neurons=n_neurons, binary="IF_cond_exp.aplx", label=label,
max_atoms_per_core=IFCondExp._model_based_max_atoms_per_core,
machine_time_step=machine_time_step,
timescale_factor=timescale_factor,
spikes_per_second=spikes_per_second,
ring_buffer_sigma=ring_buffer_sigma,
incoming_spike_buffer_size=incoming_spike_buffer_size,
model_name="IF_cond_exp", neuron_model=neuron_model,
input_type=input_type, synapse_type=synapse_type,
threshold_type=threshold_type, constraints=constraints)
@staticmethod
def set_model_max_atoms_per_core(new_value):
IFCondExp._model_based_max_atoms_per_core = new_value | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/builds/if_cond_exp.py | 0.765418 | 0.499634 | if_cond_exp.py | pypi |
from spynnaker.pyNN.models.neuron.neuron_models\
.neuron_model_leaky_integrate_and_fire \
import NeuronModelLeakyIntegrateAndFire
from spynnaker.pyNN.models.neuron.synapse_types.synapse_type_exponential \
import SynapseTypeExponential
from spynnaker.pyNN.models.neuron.input_types.input_type_current \
import InputTypeCurrent
from spynnaker.pyNN.models.neuron.threshold_types.threshold_type_static \
import ThresholdTypeStatic
from spynnaker.pyNN.models.neuron.abstract_population_vertex \
import AbstractPopulationVertex
class IFCurrExp(AbstractPopulationVertex):
""" Leaky integrate and fire neuron with an exponentially decaying \
current input
"""
_model_based_max_atoms_per_core = 255
default_parameters = {
'tau_m': 20.0, 'cm': 1.0, 'v_rest': -65.0, 'v_reset': -65.0,
'v_thresh': -50.0, 'tau_syn_E': 5.0, 'tau_syn_I': 5.0,
'tau_refrac': 0.1, 'i_offset': 0}
def __init__(
self, n_neurons, machine_time_step, timescale_factor,
spikes_per_second=None, ring_buffer_sigma=None,
incoming_spike_buffer_size=None, constraints=None, label=None,
tau_m=default_parameters['tau_m'], cm=default_parameters['cm'],
v_rest=default_parameters['v_rest'],
v_reset=default_parameters['v_reset'],
v_thresh=default_parameters['v_thresh'],
tau_syn_E=default_parameters['tau_syn_E'],
tau_syn_I=default_parameters['tau_syn_I'],
tau_refrac=default_parameters['tau_refrac'],
i_offset=default_parameters['i_offset'], v_init=None):
neuron_model = NeuronModelLeakyIntegrateAndFire(
n_neurons, machine_time_step, v_init, v_rest, tau_m, cm, i_offset,
v_reset, tau_refrac)
synapse_type = SynapseTypeExponential(
n_neurons, machine_time_step, tau_syn_E, tau_syn_I)
input_type = InputTypeCurrent()
threshold_type = ThresholdTypeStatic(n_neurons, v_thresh)
AbstractPopulationVertex.__init__(
self, n_neurons=n_neurons, binary="IF_curr_exp.aplx", label=label,
max_atoms_per_core=IFCurrExp._model_based_max_atoms_per_core,
machine_time_step=machine_time_step,
timescale_factor=timescale_factor,
spikes_per_second=spikes_per_second,
ring_buffer_sigma=ring_buffer_sigma,
incoming_spike_buffer_size=incoming_spike_buffer_size,
model_name="IF_curr_exp", neuron_model=neuron_model,
input_type=input_type, synapse_type=synapse_type,
threshold_type=threshold_type, constraints=constraints)
@staticmethod
def set_model_max_atoms_per_core(new_value):
IFCurrExp._model_based_max_atoms_per_core = new_value | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/builds/if_curr_exp.py | 0.769037 | 0.493103 | if_curr_exp.py | pypi |
from spynnaker.pyNN.models.neuron.input_types.input_type_conductance \
import InputTypeConductance
from spynnaker.pyNN.models.neuron.neuron_models.neuron_model_izh \
import NeuronModelIzh
from spynnaker.pyNN.models.neuron.synapse_types.synapse_type_exponential \
import SynapseTypeExponential
from spynnaker.pyNN.models.neuron.threshold_types.threshold_type_static \
import ThresholdTypeStatic
from spynnaker.pyNN.models.neuron.abstract_population_vertex \
import AbstractPopulationVertex
_IZK_THRESHOLD = 30.0
class IzkCondExp(AbstractPopulationVertex):
_model_based_max_atoms_per_core = 255
default_parameters = {
'a': 0.02, 'c': -65.0, 'b': 0.2, 'd': 2.0, 'i_offset': 0,
'u_init': -14.0, 'v_init': -70.0, 'tau_syn_E': 5.0, 'tau_syn_I': 5.0,
'e_rev_E': 0.0, 'e_rev_I': -70.0}
# noinspection PyPep8Naming
def __init__(
self, n_neurons, machine_time_step, timescale_factor,
spikes_per_second=None, ring_buffer_sigma=None,
incoming_spike_buffer_size=None, constraints=None, label=None,
a=default_parameters['a'], b=default_parameters['b'],
c=default_parameters['c'], d=default_parameters['d'],
i_offset=default_parameters['i_offset'],
u_init=default_parameters['u_init'],
v_init=default_parameters['v_init'],
tau_syn_E=default_parameters['tau_syn_E'],
tau_syn_I=default_parameters['tau_syn_I'],
e_rev_E=default_parameters['e_rev_E'],
e_rev_I=default_parameters['e_rev_I']):
neuron_model = NeuronModelIzh(
n_neurons, machine_time_step, a, b, c, d, v_init, u_init, i_offset)
synapse_type = SynapseTypeExponential(
n_neurons, machine_time_step, tau_syn_E, tau_syn_I)
input_type = InputTypeConductance(n_neurons, e_rev_E, e_rev_I)
threshold_type = ThresholdTypeStatic(n_neurons, _IZK_THRESHOLD)
AbstractPopulationVertex.__init__(
self, n_neurons=n_neurons, binary="IZK_cond_exp.aplx", label=label,
max_atoms_per_core=IzkCondExp._model_based_max_atoms_per_core,
machine_time_step=machine_time_step,
timescale_factor=timescale_factor,
spikes_per_second=spikes_per_second,
ring_buffer_sigma=ring_buffer_sigma,
incoming_spike_buffer_size=incoming_spike_buffer_size,
model_name="IZK_cond_exp", neuron_model=neuron_model,
input_type=input_type, synapse_type=synapse_type,
threshold_type=threshold_type, constraints=constraints)
@staticmethod
def set_model_max_atoms_per_core(new_value):
IzkCondExp._model_based_max_atoms_per_core = new_value | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/builds/izk_cond_exp.py | 0.686265 | 0.422207 | izk_cond_exp.py | pypi |
from spynnaker.pyNN.models.neuron.neuron_models\
.neuron_model_leaky_integrate_and_fire \
import NeuronModelLeakyIntegrateAndFire
from spynnaker.pyNN.models.neuron.synapse_types.synapse_type_dual_exponential \
import SynapseTypeDualExponential
from spynnaker.pyNN.models.neuron.input_types.input_type_current \
import InputTypeCurrent
from spynnaker.pyNN.models.neuron.threshold_types.threshold_type_static \
import ThresholdTypeStatic
from spynnaker.pyNN.models.neuron.abstract_population_vertex \
import AbstractPopulationVertex
class IFCurrDualExp(AbstractPopulationVertex):
""" Leaky integrate and fire neuron with two exponentially decaying \
excitatory current inputs, and one exponentially decaying inhibitory \
current input
"""
_model_based_max_atoms_per_core = 255
default_parameters = {
'tau_m': 20.0, 'cm': 1.0, 'v_rest': -65.0, 'v_reset': -65.0,
'v_thresh': -50.0, 'tau_syn_E': 5.0, 'tau_syn_E2': 5.0,
'tau_syn_I': 5.0, 'tau_refrac': 0.1, 'i_offset': 0}
def __init__(
self, n_neurons, machine_time_step, timescale_factor,
spikes_per_second=None, ring_buffer_sigma=None,
incoming_spike_buffer_size=None, constraints=None, label=None,
tau_m=default_parameters['tau_m'], cm=default_parameters['cm'],
v_rest=default_parameters['v_rest'],
v_reset=default_parameters['v_reset'],
v_thresh=default_parameters['v_thresh'],
tau_syn_E=default_parameters['tau_syn_E'],
tau_syn_E2=default_parameters['tau_syn_E2'],
tau_syn_I=default_parameters['tau_syn_I'],
tau_refrac=default_parameters['tau_refrac'],
i_offset=default_parameters['i_offset'], v_init=None):
neuron_model = NeuronModelLeakyIntegrateAndFire(
n_neurons, machine_time_step, v_init, v_rest, tau_m, cm, i_offset,
v_reset, tau_refrac)
synapse_type = SynapseTypeDualExponential(
n_neurons, machine_time_step, tau_syn_E, tau_syn_E2, tau_syn_I)
input_type = InputTypeCurrent()
threshold_type = ThresholdTypeStatic(n_neurons, v_thresh)
AbstractPopulationVertex.__init__(
self, n_neurons=n_neurons, binary="IF_curr_exp_dual.aplx",
label=label,
max_atoms_per_core=IFCurrDualExp._model_based_max_atoms_per_core,
machine_time_step=machine_time_step,
timescale_factor=timescale_factor,
spikes_per_second=spikes_per_second,
ring_buffer_sigma=ring_buffer_sigma,
incoming_spike_buffer_size=incoming_spike_buffer_size,
model_name="IF_curr_dual_exp", neuron_model=neuron_model,
input_type=input_type, synapse_type=synapse_type,
threshold_type=threshold_type, constraints=constraints)
@staticmethod
def set_model_max_atoms_per_core(new_value):
IFCurrDualExp._model_based_max_atoms_per_core = new_value | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/builds/if_curr_dual_exp.py | 0.766468 | 0.535949 | if_curr_dual_exp.py | pypi |
from spynnaker.pyNN.models.neural_properties.neural_parameter \
import NeuronParameter
from spynnaker.pyNN.models.neuron.neuron_models.abstract_neuron_model \
import AbstractNeuronModel
from spynnaker.pyNN.utilities import utility_calls
from data_specification.enums.data_type import DataType
class NeuronModelIzh(AbstractNeuronModel):
def __init__(self, n_neurons, machine_time_step, a, b, c, d, v_init,
u_init, i_offset):
AbstractNeuronModel.__init__(self)
self._n_neurons = n_neurons
self._machine_time_step = machine_time_step
self._a = utility_calls.convert_param_to_numpy(a, n_neurons)
self._b = utility_calls.convert_param_to_numpy(b, n_neurons)
self._c = utility_calls.convert_param_to_numpy(c, n_neurons)
self._d = utility_calls.convert_param_to_numpy(d, n_neurons)
self._v_init = utility_calls.convert_param_to_numpy(v_init, n_neurons)
self._u_init = utility_calls.convert_param_to_numpy(u_init, n_neurons)
self._i_offset = utility_calls.convert_param_to_numpy(
i_offset, n_neurons)
@property
def a(self):
return self._a
@a.setter
def a(self, a):
self._a = utility_calls.convert_param_to_numpy(a, self._n_neurons)
@property
def b(self):
return self._b
@b.setter
def b(self, b):
self._b = utility_calls.convert_param_to_numpy(b, self._n_neurons)
@property
def c(self):
return self.c
@c.setter
def c(self, c):
self._c = utility_calls.convert_param_to_numpy(c, self._n_neurons)
@property
def d(self):
return self._d
@d.setter
def d(self, d):
self._d = utility_calls.convert_param_to_numpy(d, self._n_neurons)
@property
def v_init(self):
return self._v_init
@v_init.setter
def v_init(self, v_init):
self._v_init = utility_calls.convert_param_to_numpy(
v_init, self._n_neurons)
@property
def u_init(self):
return self._u_init
@u_init.setter
def u_init(self, u_init):
self._u_init = utility_calls.convert_param_to_numpy(
u_init, self._n_neurons)
def initialize_v(self, v_init):
self._v_init = utility_calls.convert_param_to_numpy(
v_init, self._n_neurons)
def initialize_u(self, u_init):
self._u_init = utility_calls.convert_param_to_numpy(
u_init, self._n_neurons)
def get_n_neural_parameters(self):
return 8
def get_neural_parameters(self):
return [
# REAL A
NeuronParameter(self._a, DataType.S1615),
# REAL B
NeuronParameter(self._b, DataType.S1615),
# REAL C
NeuronParameter(self._c, DataType.S1615),
# REAL D
NeuronParameter(self._d, DataType.S1615),
# REAL V
NeuronParameter(self._v_init, DataType.S1615),
# REAL U
NeuronParameter(self._u_init, DataType.S1615),
# offset current [nA]
# REAL I_offset;
NeuronParameter(self._i_offset, DataType.S1615),
# current timestep - simple correction for threshold
# REAL this_h;
NeuronParameter(self._machine_time_step / 1000.0, DataType.S1615)
]
def get_n_global_parameters(self):
return 1
def get_global_parameters(self):
return [
NeuronParameter(self._machine_time_step / 1000.0, DataType.S1615)
]
def get_n_cpu_cycles_per_neuron(self):
# A bit of a guess
return 150 | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_izh.py | 0.79799 | 0.466967 | neuron_model_izh.py | pypi |
from spynnaker.pyNN.models.neural_properties.neural_parameter \
import NeuronParameter
from spynnaker.pyNN.models.neuron.neuron_models.neuron_model_leaky_integrate \
import NeuronModelLeakyIntegrate
from spynnaker.pyNN.utilities import utility_calls
from data_specification.enums.data_type import DataType
import numpy
class NeuronModelLeakyIntegrateAndFire(NeuronModelLeakyIntegrate):
def __init__(self, n_neurons, machine_time_step, v_init, v_rest, tau_m, cm,
i_offset, v_reset, tau_refrac):
NeuronModelLeakyIntegrate.__init__(
self, n_neurons, machine_time_step, v_init, v_rest, tau_m, cm,
i_offset)
self._v_reset = utility_calls.convert_param_to_numpy(
v_reset, n_neurons)
self._tau_refrac = utility_calls.convert_param_to_numpy(
tau_refrac, n_neurons)
@property
def v_reset(self):
return self._v_reset
@v_reset.setter
def v_reset(self, v_reset):
self._v_reset = utility_calls.convert_param_to_numpy(
v_reset, self._n_neurons)
@property
def tau_refrac(self):
return self._tau_refrac
@tau_refrac.setter
def tau_refrac(self, tau_refrac):
self._tau_refrac = utility_calls.convert_param_to_numpy(
tau_refrac, self._n_neurons)
def get_n_neural_parameters(self):
return NeuronModelLeakyIntegrate.get_n_neural_parameters(self) + 3
@property
def _tau_refrac_timesteps(self):
return numpy.ceil(self._tau_refrac /
(self._machine_time_step / 1000.0))
def get_neural_parameters(self):
params = NeuronModelLeakyIntegrate.get_neural_parameters(self)
params.extend([
# countdown to end of next refractory period [timesteps]
# int32_t refract_timer;
NeuronParameter(0, DataType.INT32),
# post-spike reset membrane voltage [mV]
# REAL V_reset;
NeuronParameter(self._v_reset, DataType.S1615),
# refractory time of neuron [timesteps]
# int32_t T_refract;
NeuronParameter(self._tau_refrac_timesteps, DataType.INT32)
])
return params
def get_n_cpu_cycles_per_neuron(self):
# A guess - 20 for the reset procedure
return NeuronModelLeakyIntegrate.get_n_cpu_cycles_per_neuron(self) + 20 | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_leaky_integrate_and_fire.py | 0.797596 | 0.482185 | neuron_model_leaky_integrate_and_fire.py | pypi |
from spynnaker.pyNN.models.neural_properties.neural_parameter \
import NeuronParameter
from spynnaker.pyNN.models.neuron.neuron_models.abstract_neuron_model \
import AbstractNeuronModel
from spynnaker.pyNN.utilities import utility_calls
from data_specification.enums.data_type import DataType
import numpy
class NeuronModelLeakyIntegrate(AbstractNeuronModel):
def __init__(self, n_neurons, machine_time_step, v_init, v_rest, tau_m, cm,
i_offset):
AbstractNeuronModel.__init__(self)
self._n_neurons = n_neurons
self._machine_time_step = machine_time_step
self._v_init = utility_calls.convert_param_to_numpy(v_init, n_neurons)
self._v_rest = utility_calls.convert_param_to_numpy(v_rest, n_neurons)
self._tau_m = utility_calls.convert_param_to_numpy(tau_m, n_neurons)
self._cm = utility_calls.convert_param_to_numpy(cm, n_neurons)
self._i_offset = utility_calls.convert_param_to_numpy(
i_offset, n_neurons)
if v_init is None:
self._v_init = v_rest
def initialize_v(self, v_init):
self._v_init = utility_calls.convert_param_to_numpy(
v_init, self._n_neurons)
@property
def v_init(self):
return self._v_init
@v_init.setter
def v_init(self, v_init):
self._v_init = utility_calls.convert_param_to_numpy(
v_init, self._n_neurons)
@property
def v_rest(self):
return self._v_rest
@v_rest.setter
def v_rest(self, v_rest):
self._v_rest = utility_calls.convert_param_to_numpy(
v_rest, self._n_neurons)
@property
def tau_m(self):
return self._tau_m
@tau_m.setter
def tau_m(self, tau_m):
self._tau_m = utility_calls.convert_param_to_numpy(
tau_m, self._n_neurons)
@property
def cm(self):
return self._cm
@cm.setter
def cm(self, cm):
self._cm = utility_calls.convert_param_to_numpy(cm, self._n_neurons)
@property
def i_offset(self):
return self._i_offset
@i_offset.setter
def i_offset(self, i_offset):
self._i_offset = utility_calls.convert_param_to_numpy(
i_offset, self._n_neurons)
@property
def _r_membrane(self):
return self._tau_m / self._cm
@property
def _exp_tc(self):
return numpy.exp(float(-self._machine_time_step) /
(1000.0 * self._tau_m))
def get_n_neural_parameters(self):
return 5
def get_neural_parameters(self):
return [
# membrane voltage [mV]
# REAL V_membrane;
NeuronParameter(self._v_init, DataType.S1615),
# membrane resting voltage [mV]
# REAL V_rest;
NeuronParameter(self._v_rest, DataType.S1615),
# membrane resistance [MOhm]
# REAL R_membrane;
NeuronParameter(self._r_membrane, DataType.S1615),
# 'fixed' computation parameter - time constant multiplier for
# closed-form solution
# exp( -(machine time step in ms)/(R * C) ) [.]
# REAL exp_TC;
NeuronParameter(self._exp_tc, DataType.S1615),
# offset current [nA]
# REAL I_offset;
NeuronParameter(self._i_offset, DataType.S1615)
]
def get_n_global_parameters(self):
return 0
def get_global_parameters(self):
return []
def get_n_cpu_cycles_per_neuron(self):
# A bit of a guess
return 80 | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/neuron_models/neuron_model_leaky_integrate.py | 0.825449 | 0.37165 | neuron_model_leaky_integrate.py | pypi |
from six import add_metaclass
from abc import ABCMeta
from abc import abstractmethod
import numpy
import math
@add_metaclass(ABCMeta)
class AbstractSynapseDynamics(object):
NUMPY_CONNECTORS_DTYPE = [("source", "uint32"), ("target", "uint32"),
("weight", "float64"), ("delay", "float64")]
@abstractmethod
def is_same_as(self, synapse_dynamics):
""" Determines if this synapse dynamics is the same as another
"""
@abstractmethod
def are_weights_signed(self):
""" Determines if the weights are signed values
"""
@abstractmethod
def get_vertex_executable_suffix(self):
""" Get the executable suffix for a vertex for this dynamics
"""
@abstractmethod
def get_parameters_sdram_usage_in_bytes(self, n_neurons, n_synapse_types):
""" Get the SDRAM usage of the synapse dynamics parameters in bytes
"""
@abstractmethod
def write_parameters(self, spec, region, machine_time_step, weight_scales):
""" Write the synapse parameters to the spec
"""
def get_provenance_data(self, pre_population_label, post_population_label):
""" Get the provenance data from this synapse dynamics object
"""
return list()
def get_delay_maximum(self, connector):
""" Get the maximum delay for the synapses
"""
return connector.get_delay_maximum()
def get_weight_mean(
self, connector, n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
""" Get the mean weight for the synapses
"""
return connector.get_weight_mean(
n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice)
def get_weight_maximum(
self, connector, n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
""" Get the maximum weight for the synapses
"""
return connector.get_weight_maximum(
n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice)
def get_weight_variance(
self, connector, n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
""" Get the variance in weight for the synapses
"""
return connector.get_weight_variance(
n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice)
def convert_per_connection_data_to_rows(
self, connection_row_indices, n_rows, data):
""" Converts per-connection data generated from connections into\
row-based data to be returned from get_synaptic_data
"""
return [
data[connection_row_indices == i].reshape(-1)
for i in range(n_rows)
]
def get_n_items(self, rows, item_size):
""" Get the number of items in each row as 4-byte values, given the\
item size
"""
return numpy.array([
int(math.ceil(float(row.size) / float(item_size)))
for row in rows], dtype="uint32").reshape((-1, 1))
def get_words(self, rows):
""" Convert the row data to words
"""
words = [numpy.pad(
row, (0, (4 - (row.size % 4)) & 0x3), mode="constant",
constant_values=0).view("uint32") for row in rows]
return words | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_synapse_dynamics.py | 0.914452 | 0.524334 | abstract_synapse_dynamics.py | pypi |
from spynnaker.pyNN.models.neuron.synapse_dynamics.abstract_synapse_dynamics \
import AbstractSynapseDynamics
from six import add_metaclass
from abc import ABCMeta
from abc import abstractmethod
@add_metaclass(ABCMeta)
class AbstractPlasticSynapseDynamics(AbstractSynapseDynamics):
"""
AbstractPlasticSynapseDynamics : synapses which change over time
"""
@abstractmethod
def get_n_words_for_plastic_connections(self, n_connections):
""" Get the number of 32-bit words for n_connections in a single row
"""
@abstractmethod
def get_plastic_synaptic_data(
self, connections, connection_row_indices, n_rows,
post_vertex_slice, n_synapse_types):
""" Get the fixed-plastic data, and plastic-plastic data for each row,\
and lengths for the fixed_plastic and plastic-plastic parts of\
each row.
Data is returned as an array made up of an array of 32-bit words\
for each row, for each of the fixed-plastic and plastic-plastic\
data regions. The row into which connection should go is given by\
connection_row_indices, and the total number of rows is given by\
n_rows.
Lengths are returned as an array made up of an integer for each\
row, for each of the fixed-plastic and plastic-plastic regions.
"""
@abstractmethod
def get_n_plastic_plastic_words_per_row(self, pp_size):
""" Get the number of plastic plastic words to be read from each row
"""
@abstractmethod
def get_n_fixed_plastic_words_per_row(self, fp_size):
""" Get the number of fixed plastic words to be read from each row
"""
@abstractmethod
def get_n_synapses_in_rows(self, pp_size, fp_size):
""" Get the number of synapses in each of the rows with plastic sizes
pp_size and fp_size
"""
@abstractmethod
def read_plastic_synaptic_data(
self, post_vertex_slice, n_synapse_types, pp_size, pp_data,
fp_size, fp_data):
""" Read the connections indicated in the connection indices from the\
data in pp_data and fp_data
""" | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/synapse_dynamics/abstract_plastic_synapse_dynamics.py | 0.772874 | 0.408159 | abstract_plastic_synapse_dynamics.py | pypi |
import numpy
import math
from spynnaker.pyNN.models.neuron.synapse_dynamics\
.abstract_static_synapse_dynamics import AbstractStaticSynapseDynamics
class SynapseDynamicsStatic(AbstractStaticSynapseDynamics):
def __init__(self):
AbstractStaticSynapseDynamics.__init__(self)
def is_same_as(self, synapse_dynamics):
return isinstance(synapse_dynamics, SynapseDynamicsStatic)
def are_weights_signed(self):
return False
def get_vertex_executable_suffix(self):
return ""
def get_parameters_sdram_usage_in_bytes(self, n_neurons, n_synapse_types):
return 0
def write_parameters(self, spec, region, machine_time_step, weight_scales):
pass
def get_n_words_for_static_connections(self, n_connections):
return n_connections
def get_static_synaptic_data(
self, connections, connection_row_indices, n_rows,
post_vertex_slice, n_synapse_types):
n_synapse_type_bits = int(math.ceil(math.log(n_synapse_types, 2)))
fixed_fixed = (
((numpy.rint(numpy.abs(connections["weight"])).astype("uint32") &
0xFFFF) << 16) |
((connections["delay"].astype("uint32") & 0xF) <<
(8 + n_synapse_type_bits)) |
(connections["synapse_type"].astype("uint32") << 8) |
((connections["target"] - post_vertex_slice.lo_atom) & 0xFF))
fixed_fixed_rows = self.convert_per_connection_data_to_rows(
connection_row_indices, n_rows,
fixed_fixed.view(dtype="uint8").reshape((-1, 4)))
ff_size = self.get_n_items(fixed_fixed_rows, 4)
ff_data = [fixed_row.view("uint32") for fixed_row in fixed_fixed_rows]
return (ff_data, ff_size)
def get_n_static_words_per_row(self, ff_size):
# The sizes are in words, so just return them
return ff_size
def get_n_synapses_in_rows(self, ff_size):
# Each word is a synapse and sizes are in words, so just return them
return ff_size
def read_static_synaptic_data(
self, post_vertex_slice, n_synapse_types, ff_size, ff_data):
n_synapse_type_bits = int(math.ceil(math.log(n_synapse_types, 2)))
data = numpy.concatenate(ff_data)
connections = numpy.zeros(data.size, dtype=self.NUMPY_CONNECTORS_DTYPE)
connections["source"] = numpy.concatenate([numpy.repeat(
i, ff_size[i]) for i in range(len(ff_size))])
connections["target"] = (data & 0xFF) + post_vertex_slice.lo_atom
connections["weight"] = (data >> 16) & 0xFFFF
connections["delay"] = (data >> (8 + n_synapse_type_bits)) & 0xF
connections["delay"][connections["delay"] == 0] = 16
return connections | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_static.py | 0.67694 | 0.286387 | synapse_dynamics_static.py | pypi |
import math
import numpy
from spynnaker.pyNN.models.neuron.synapse_dynamics\
.abstract_plastic_synapse_dynamics import AbstractPlasticSynapseDynamics
# How large are the time-stamps stored with each event
TIME_STAMP_BYTES = 4
# When not using the MAD scheme, how many pre-synaptic events are buffered
NUM_PRE_SYNAPTIC_EVENTS = 4
class SynapseDynamicsSTDP(AbstractPlasticSynapseDynamics):
def __init__(
self, timing_dependence=None, weight_dependence=None,
voltage_dependence=None,
dendritic_delay_fraction=1.0, mad=True):
AbstractPlasticSynapseDynamics.__init__(self)
self._timing_dependence = timing_dependence
self._weight_dependence = weight_dependence
self._dendritic_delay_fraction = float(dendritic_delay_fraction)
self._mad = mad
if (self._dendritic_delay_fraction < 0.5 or
self._dendritic_delay_fraction > 1.0):
raise NotImplementedError(
"dendritic_delay_fraction must be in the interval [0.5, 1.0]")
if self._timing_dependence is None or self._weight_dependence is None:
raise NotImplementedError(
"Both timing_dependence and weight_dependence must be"
"specified")
if voltage_dependence is not None:
raise NotImplementedError(
"Voltage dependence has not been implemented")
@property
def weight_dependence(self):
return self._weight_dependence
@property
def timing_dependence(self):
return self._timing_dependence
@property
def dendritic_delay_fraction(self):
return self._dendritic_delay_fraction
def is_same_as(self, synapse_dynamics):
if not isinstance(synapse_dynamics, SynapseDynamicsSTDP):
return False
return (
self._timing_dependence.is_same_as(
synapse_dynamics._timing_dependence) and
self._weight_dependence.is_same_as(
synapse_dynamics._weight_dependence) and
(self._dendritic_delay_fraction ==
synapse_dynamics._dendritic_delay_fraction) and
(self._mad == synapse_dynamics._mad))
def are_weights_signed(self):
return False
def get_vertex_executable_suffix(self):
name = "_stdp_mad" if self._mad else "_stdp"
name += "_" + self._timing_dependence.vertex_executable_suffix
name += "_" + self._weight_dependence.vertex_executable_suffix
return name
def get_parameters_sdram_usage_in_bytes(self, n_neurons, n_synapse_types):
size = 0
size += self._timing_dependence.get_parameters_sdram_usage_in_bytes()
size += self._weight_dependence.get_parameters_sdram_usage_in_bytes(
n_synapse_types, self._timing_dependence.n_weight_terms)
return size
def write_parameters(self, spec, region, machine_time_step, weight_scales):
spec.comment("Writing Plastic Parameters")
# Switch focus to the region:
spec.switch_write_focus(region)
# Write timing dependence parameters to region
self._timing_dependence.write_parameters(
spec, machine_time_step, weight_scales)
# Write weight dependence information to region
self._weight_dependence.write_parameters(
spec, machine_time_step, weight_scales,
self._timing_dependence.n_weight_terms)
@property
def _n_header_bytes(self):
if self._mad:
# If we're using MAD, the header contains a single timestamp and
# pre-trace
return (
TIME_STAMP_BYTES + self.timing_dependence.pre_trace_n_bytes)
else:
# Otherwise, headers consist of a counter followed by
# NUM_PRE_SYNAPTIC_EVENTS timestamps and pre-traces
return (
4 + (NUM_PRE_SYNAPTIC_EVENTS *
(TIME_STAMP_BYTES +
self.timing_dependence.pre_trace_n_bytes)))
def get_n_words_for_plastic_connections(self, n_connections):
synapse_structure = self._timing_dependence.synaptic_structure
fp_size_words = \
n_connections if n_connections % 2 == 0 else n_connections + 1
pp_size_bytes = (
self._n_header_bytes +
(synapse_structure.get_n_bytes_per_connection() * n_connections))
pp_size_words = int(math.ceil(float(pp_size_bytes) / 4.0))
return fp_size_words + pp_size_words
def get_plastic_synaptic_data(
self, connections, connection_row_indices, n_rows,
post_vertex_slice, n_synapse_types):
n_synapse_type_bits = int(math.ceil(math.log(n_synapse_types, 2)))
dendritic_delays = (
connections["delay"] * self._dendritic_delay_fraction)
axonal_delays = (
connections["delay"] * (1.0 - self._dendritic_delay_fraction))
# Get the fixed data
fixed_plastic = (
((dendritic_delays.astype("uint16") & 0xF) <<
(8 + n_synapse_type_bits)) |
((axonal_delays.astype("uint16") & 0xF) <<
(12 + n_synapse_type_bits)) |
(connections["synapse_type"].astype("uint16") << 8) |
((connections["target"].astype("uint16") -
post_vertex_slice.lo_atom) & 0xFF))
fixed_plastic_rows = self.convert_per_connection_data_to_rows(
connection_row_indices, n_rows,
fixed_plastic.view(dtype="uint8").reshape((-1, 2)))
fp_size = self.get_n_items(fixed_plastic_rows, 2)
fp_data = self.get_words(fixed_plastic_rows)
# Get the plastic data
synapse_structure = self._timing_dependence.synaptic_structure
plastic_plastic = synapse_structure.get_synaptic_data(connections)
plastic_headers = numpy.zeros(
(n_rows, self._n_header_bytes), dtype="uint8")
plastic_plastic_row_data = self.convert_per_connection_data_to_rows(
connection_row_indices, n_rows, plastic_plastic)
plastic_plastic_rows = [
numpy.concatenate((
plastic_headers[i], plastic_plastic_row_data[i]))
for i in range(n_rows)]
pp_size = self.get_n_items(plastic_plastic_rows, 4)
pp_data = self.get_words(plastic_plastic_rows)
return (fp_data, pp_data, fp_size, pp_size)
def get_n_plastic_plastic_words_per_row(self, pp_size):
# pp_size is in words, so return
return pp_size
def get_n_fixed_plastic_words_per_row(self, fp_size):
# fp_size is in half-words
return numpy.ceil(fp_size / 2.0).astype(dtype="uint32")
def get_n_synapses_in_rows(self, pp_size, fp_size):
# Each fixed-plastic synapse is a half-word and fp_size is in half
# words so just return it
return fp_size
def read_plastic_synaptic_data(
self, post_vertex_slice, n_synapse_types, pp_size, pp_data,
fp_size, fp_data):
n_rows = len(fp_size)
n_synapse_type_bits = int(math.ceil(math.log(n_synapse_types, 2)))
data_fixed = numpy.concatenate([
fp_data[i].view(dtype="uint16")[0:fp_size[i]]
for i in range(n_rows)])
pp_without_headers = [
row.view(dtype="uint8")[self._n_header_bytes:] for row in pp_data]
synapse_structure = self._timing_dependence.synaptic_structure
connections = numpy.zeros(
data_fixed.size, dtype=self.NUMPY_CONNECTORS_DTYPE)
connections["source"] = numpy.concatenate(
[numpy.repeat(i, fp_size[i]) for i in range(len(fp_size))])
connections["target"] = (data_fixed & 0xFF) + post_vertex_slice.lo_atom
connections["weight"] = synapse_structure.read_synaptic_data(
fp_size, pp_without_headers)
connections["delay"] = (data_fixed >> (8 + n_synapse_type_bits)) & 0xF
connections["delay"][connections["delay"] == 0] = 16
return connections
def get_weight_mean(
self, connector, n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
# Because the weights could all be changed to the maximum, the mean
# has to be given as the maximum for scaling
return self._weight_dependence.weight_maximum
def get_weight_variance(
self, connector, n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
# Because the weights could all be changed to the maximum, the variance
# has to be given as no variance
return 0.0
def get_weight_maximum(
self, connector, n_pre_slices, pre_slice_index, n_post_slices,
post_slice_index, pre_vertex_slice, post_vertex_slice):
# The maximum weight is the largest that it could be set to from
# the weight dependence
return self._weight_dependence.weight_maximum
def get_provenance_data(self, pre_population_label, post_population_label):
prov_data = list()
if self._timing_dependence is not None:
prov_data.extend(self._timing_dependence.get_provenance_data(
pre_population_label, post_population_label))
if self._weight_dependence is not None:
prov_data.extend(self._weight_dependence.get_provenance_data(
pre_population_label, post_population_label))
return prov_data | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/synapse_dynamics/synapse_dynamics_stdp.py | 0.835852 | 0.333639 | synapse_dynamics_stdp.py | pypi |
from spynnaker.pyNN.utilities import utility_calls
from spynnaker.pyNN.models.neural_properties.neural_parameter \
import NeuronParameter
from spynnaker.pyNN.models.neuron.synapse_types.abstract_synapse_type \
import AbstractSynapseType
from data_specification.enums.data_type import DataType
import numpy
def get_exponential_decay_and_init(tau, machine_time_step):
decay = numpy.exp(numpy.divide(-float(machine_time_step),
numpy.multiply(1000.0, tau)))
init = numpy.multiply(numpy.multiply(tau, numpy.subtract(1.0, decay)),
(1000.0 / float(machine_time_step)))
scale = float(pow(2, 32))
decay_scaled = numpy.multiply(decay, scale).astype("uint32")
init_scaled = numpy.multiply(init, scale).astype("uint32")
return decay_scaled, init_scaled
class SynapseTypeExponential(AbstractSynapseType):
def __init__(self, n_neurons, machine_time_step, tau_syn_E, tau_syn_I):
AbstractSynapseType.__init__(self)
self._n_neurons = n_neurons
self._machine_time_step = machine_time_step
self._tau_syn_E = utility_calls.convert_param_to_numpy(
tau_syn_E, n_neurons)
self._tau_syn_I = utility_calls.convert_param_to_numpy(
tau_syn_I, n_neurons)
@property
def tau_syn_E(self):
return self._tau_syn_E
@tau_syn_E.setter
def tau_syn_E(self, tau_syn_E):
self._tau_syn_E = utility_calls.convert_param_to_numpy(
tau_syn_E, self._n_neurons)
@property
def tau_syn_I(self):
return self._tau_syn_I
@tau_syn_I.setter
def tau_syn_I(self, tau_syn_I):
self._tau_syn_I = utility_calls.convert_param_to_numpy(
tau_syn_I, self._n_neurons)
def get_n_synapse_types(self):
return 2
def get_synapse_id_by_target(self, target):
if target == "excitatory":
return 0
elif target == "inhibitory":
return 1
return None
def get_synapse_targets(self):
return "excitatory", "inhibitory"
def get_n_synapse_type_parameters(self):
return 4
def get_synapse_type_parameters(self):
e_decay, e_init = get_exponential_decay_and_init(
self._tau_syn_E, self._machine_time_step)
i_decay, i_init = get_exponential_decay_and_init(
self._tau_syn_I, self._machine_time_step)
return [
NeuronParameter(e_decay, DataType.UINT32),
NeuronParameter(e_init, DataType.UINT32),
NeuronParameter(i_decay, DataType.UINT32),
NeuronParameter(i_init, DataType.UINT32)
]
def get_n_cpu_cycles_per_neuron(self):
# A guess
return 100 | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_exponential.py | 0.803983 | 0.436262 | synapse_type_exponential.py | pypi |
from six import add_metaclass
from abc import ABCMeta
from abc import abstractmethod
import math
@add_metaclass(ABCMeta)
class AbstractSynapseType(object):
""" Represents the synapse types supported
"""
@abstractmethod
def get_n_synapse_types(self):
""" Get the number of synapse types supported
:return: The number of synapse types supported
:rtype: int
"""
@abstractmethod
def get_synapse_id_by_target(self, target):
""" Get the id of a synapse given the name
:param name: The name of the synapse
:type name: str
:return: The id of the synapse
:rtype: int
"""
@abstractmethod
def get_synapse_targets(self):
""" Get the target names of the synapse type
:return: an array of strings
:rtype: array of str
"""
@abstractmethod
def get_n_synapse_type_parameters(self):
""" Get the number of synapse type parameters
:return: the number of parameters
:rtype: int
"""
@abstractmethod
def get_synapse_type_parameters(self):
""" Get the synapse type parameters
:return: The parameters
:rtype: array of\
:py:class:`spynnaker.pyNN.models.neural_properties.neural_parameter.NeuronParameter`
"""
@abstractmethod
def get_n_cpu_cycles_per_neuron(self):
""" Get the total number of CPU cycles executed by\
synapse_types_shape_input, synapse_types_add_neuron_input,\
synapse_types_get_excitatory_input and \
synapse_types_get_inhibitory_input
:return: The number of CPU cycles
:rtype: int
"""
def get_n_synapse_type_bits(self):
""" Get the number of bits required to represent the synapse types
:return: the number of bits
:rtype: int
"""
return int(math.ceil(math.log(self.get_n_synapse_types(), 2)))
def get_sdram_usage_per_neuron_in_bytes(self):
""" Get the SDRAM usage of the synapse type per neuron in bytes
:return: the number of bytes
:rtype: int
"""
return self.get_n_synapse_type_parameters() * 4
def get_dtcm_usage_per_neuron_in_bytes(self):
""" Get the DTCM usage of the synapse type per neuron in bytes
:return: the number of bytes
:rtype: int
"""
return self.get_n_synapse_type_parameters() * 4 | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/synapse_types/abstract_synapse_type.py | 0.863651 | 0.429848 | abstract_synapse_type.py | pypi |
from spynnaker.pyNN.models.neuron.synapse_types.synapse_type_exponential \
import get_exponential_decay_and_init
from spynnaker.pyNN.models.neural_properties.neural_parameter \
import NeuronParameter
from spynnaker.pyNN.models.neuron.synapse_types.abstract_synapse_type \
import AbstractSynapseType
from spynnaker.pyNN.utilities import utility_calls
from data_specification.enums.data_type import DataType
class SynapseTypeDualExponential(AbstractSynapseType):
def __init__(self, n_neurons, machine_time_step, tau_syn_E, tau_syn_E2,
tau_syn_I):
AbstractSynapseType.__init__(self)
self._n_neurons = n_neurons
self._machine_time_step = machine_time_step
self._tau_syn_E = utility_calls.convert_param_to_numpy(
tau_syn_E, n_neurons)
self._tau_syn_E2 = utility_calls.convert_param_to_numpy(
tau_syn_E2, n_neurons)
self._tau_syn_I = utility_calls.convert_param_to_numpy(
tau_syn_I, n_neurons)
@property
def tau_syn_E(self):
return self._tau_syn_E
@tau_syn_E.setter
def tau_syn_E(self, tau_syn_E):
self._tau_syn_E = utility_calls.convert_param_to_numpy(
tau_syn_E, self._n_neurons)
@property
def tau_syn_E2(self):
return self._tau_syn_E2
@tau_syn_E2.setter
def tau_syn_E2(self, tau_syn_E2):
self._tau_syn_E2 = utility_calls.convert_param_to_numpy(
tau_syn_E2, self._n_neurons)
@property
def tau_syn_I(self):
return self._tau_syn_I
@tau_syn_I.setter
def tau_syn_I(self, tau_syn_I):
self._tau_syn_E = utility_calls.convert_param_to_numpy(
tau_syn_I, self._n_neurons)
def get_n_synapse_types(self):
return 3
def get_synapse_id_by_target(self, target):
if target == "excitatory":
return 0
elif target == "excitatory2":
return 1
elif target == "inhibitory":
return 2
return None
def get_synapse_targets(self):
return "excitatory", "excitatory2", "inhibitory"
def get_n_synapse_type_parameters(self):
return 6
def get_synapse_type_parameters(self):
e_decay, e_init = get_exponential_decay_and_init(
self._tau_syn_E, self._machine_time_step)
e_decay2, e_init2 = get_exponential_decay_and_init(
self._tau_syn_E2, self._machine_time_step)
i_decay, i_init = get_exponential_decay_and_init(
self._tau_syn_I, self._machine_time_step)
return [
NeuronParameter(e_decay, DataType.UINT32),
NeuronParameter(e_init, DataType.UINT32),
NeuronParameter(e_decay2, DataType.UINT32),
NeuronParameter(e_init2, DataType.UINT32),
NeuronParameter(i_decay, DataType.UINT32),
NeuronParameter(i_init, DataType.UINT32)
]
def get_n_cpu_cycles_per_neuron(self):
# A guess
return 100 | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/neuron/synapse_types/synapse_type_dual_exponential.py | 0.778523 | 0.407392 | synapse_type_dual_exponential.py | pypi |
from spinn_machine.utilities.progress_bar import ProgressBar
from spinnman.messages.eieio.data_messages.eieio_data_header \
import EIEIODataHeader
import numpy
import logging
logger = logging.getLogger(__name__)
class EIEIOSpikeRecorder(object):
""" Records spikes using EIEIO format
"""
def __init__(self, machine_time_step):
self._machine_time_step = machine_time_step
self._record = False
@property
def record(self):
return self._record
@record.setter
def record(self, record):
self._record = record
def get_dtcm_usage_in_bytes(self):
if not self._record:
return 0
return 4
def get_n_cpu_cycles(self, n_neurons):
if not self._record:
return 0
return n_neurons * 4
def get_spikes(self, label, buffer_manager, region, state_region,
placements, graph_mapper, partitionable_vertex,
base_key_function):
results = list()
missing_str = ""
ms_per_tick = self._machine_time_step / 1000.0
subvertices = \
graph_mapper.get_subvertices_from_vertex(partitionable_vertex)
progress_bar = ProgressBar(len(subvertices),
"Getting spikes for {}".format(label))
for subvertex in subvertices:
placement = placements.get_placement_of_subvertex(subvertex)
subvertex_slice = graph_mapper.get_subvertex_slice(subvertex)
x = placement.x
y = placement.y
p = placement.p
# Read the spikes
raw_spike_data, data_missing = \
buffer_manager.get_data_for_vertex(
placement, region, state_region)
if data_missing:
missing_str += "({}, {}, {}); ".format(x, y, p)
spike_data = str(raw_spike_data.read_all())
number_of_bytes_written = len(spike_data)
offset = 0
while offset < number_of_bytes_written:
eieio_header = EIEIODataHeader.from_bytestring(
spike_data, offset)
offset += eieio_header.size
timestamp = eieio_header.payload_base * ms_per_tick
timestamps = numpy.repeat([timestamp], eieio_header.count)
keys = numpy.frombuffer(
spike_data, dtype="<u4", count=eieio_header.count,
offset=offset)
neuron_ids = ((keys - base_key_function(subvertex)) +
subvertex_slice.lo_atom)
offset += eieio_header.count * 4
results.append(numpy.dstack((neuron_ids, timestamps))[0])
progress_bar.update()
progress_bar.end()
if len(missing_str) > 0:
logger.warn(
"Population {} is missing spike data in region {} from the"
" following cores: {}".format(label, region, missing_str))
if len(results) != 0:
result = numpy.vstack(results)
result = result[numpy.lexsort((result[:, 1], result[:, 0]))]
else:
result = []
return result | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/common/eieio_spike_recorder.py | 0.714728 | 0.296366 | eieio_spike_recorder.py | pypi |
from spinn_front_end_common.utilities import helpful_functions
from spynnaker.pyNN import exceptions
import struct
import logging
import numpy
logger = logging.getLogger(__name__)
_RECORDING_COUNT_SIZE = 4
def get_recording_region_size_in_bytes(
n_machine_time_steps, bytes_per_timestep):
""" Get the size of a recording region in bytes
"""
if n_machine_time_steps is None:
raise Exception(
"Cannot record this parameter without a fixed run time")
return ((n_machine_time_steps * bytes_per_timestep) +
(n_machine_time_steps * 4))
def get_data(transceiver, placement, region, region_size):
""" Get the recorded data from a region
"""
region_base_address = helpful_functions.locate_memory_region_on_core(
placement.x, placement.y, placement.p, region, transceiver)
number_of_bytes_written_buf = buffer(transceiver.read_memory(
placement.x, placement.y, region_base_address, 4))
number_of_bytes_written = struct.unpack_from(
"<I", number_of_bytes_written_buf)[0]
# Subtract 4 for the word representing the size itself
expected_size = region_size - _RECORDING_COUNT_SIZE
if number_of_bytes_written > expected_size:
raise exceptions.MemReadException(
"Expected {} bytes but read {}".format(
expected_size, number_of_bytes_written))
return (
transceiver.read_memory(
placement.x, placement.y, region_base_address + 4,
number_of_bytes_written),
number_of_bytes_written)
def pull_off_cached_lists(no_loads, cache_file):
""" Extracts numpy based data from a file
:param no_loads: the number of numpy elements in the file
:param cache_file: the file to extract from
:return: The extracted data
"""
cache_file.seek(0)
if no_loads == 1:
values = numpy.load(cache_file)
# Seek to the end of the file (for windows compatibility)
cache_file.seek(0, 2)
return values
elif no_loads == 0:
return []
else:
lists = list()
for _ in range(0, no_loads):
lists.append(numpy.load(cache_file))
# Seek to the end of the file (for windows compatibility)
cache_file.seek(0, 2)
return numpy.concatenate(lists)
def needs_buffering(buffer_max, space_needed, enable_buffered_recording):
if space_needed == 0:
return False
if not enable_buffered_recording:
return False
if buffer_max < space_needed:
return True
return False
def get_buffer_sizes(buffer_max, space_needed, enable_buffered_recording):
if space_needed == 0:
return 0
if not enable_buffered_recording:
return space_needed
if buffer_max < space_needed:
return buffer_max
return space_needed | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/common/recording_utils.py | 0.771499 | 0.471527 | recording_utils.py | pypi |
from spynnaker.pyNN.utilities import constants
from spinn_front_end_common.abstract_models.abstract_changable_after_run \
import AbstractChangableAfterRun
from spynnaker.pyNN.models.common.simple_population_settable \
import SimplePopulationSettable
from spynnaker.pyNN.models.common.eieio_spike_recorder \
import EIEIOSpikeRecorder
from spynnaker.pyNN.models.common.abstract_spike_recordable \
import AbstractSpikeRecordable
from spynnaker.pyNN.utilities.conf import config
from spinn_front_end_common.abstract_models\
.abstract_has_first_machine_time_step \
import AbstractHasFirstMachineTimeStep
# spinn front end common imports
from spinn_front_end_common.abstract_models.\
abstract_provides_outgoing_partition_constraints import \
AbstractProvidesOutgoingPartitionConstraints
from spinn_front_end_common.utility_models.reverse_ip_tag_multi_cast_source \
import ReverseIpTagMultiCastSource
from spinn_front_end_common.utilities import constants as \
front_end_common_constants
from spinn_front_end_common.utilities import exceptions
from spinn_front_end_common.utility_models\
.reverse_ip_tag_multicast_source_partitioned_vertex \
import ReverseIPTagMulticastSourcePartitionedVertex
# general imports
import logging
import sys
logger = logging.getLogger(__name__)
class SpikeSourceArray(
ReverseIpTagMultiCastSource, AbstractSpikeRecordable,
SimplePopulationSettable, AbstractChangableAfterRun,
AbstractHasFirstMachineTimeStep):
""" Model for play back of spikes
"""
_model_based_max_atoms_per_core = sys.maxint
def __init__(
self, n_neurons, machine_time_step, timescale_factor,
spike_times=None, port=None, tag=None, ip_address=None,
board_address=None, max_on_chip_memory_usage_for_spikes_in_bytes=(
constants.SPIKE_BUFFER_SIZE_BUFFERING_IN),
space_before_notification=640,
constraints=None, label="SpikeSourceArray",
spike_recorder_buffer_size=(
constants.EIEIO_SPIKE_BUFFER_SIZE_BUFFERING_OUT),
buffer_size_before_receive=(
constants.EIEIO_BUFFER_SIZE_BEFORE_RECEIVE)):
self._ip_address = ip_address
if ip_address is None:
self._ip_address = config.get("Buffers", "receive_buffer_host")
self._port = port
if port is None:
self._port = config.getint("Buffers", "receive_buffer_port")
if spike_times is None:
spike_times = []
self._minimum_sdram_for_buffering = config.getint(
"Buffers", "minimum_buffer_sdram")
self._using_auto_pause_and_resume = config.getboolean(
"Buffers", "use_auto_pause_and_resume")
ReverseIpTagMultiCastSource.__init__(
self, n_keys=n_neurons, machine_time_step=machine_time_step,
timescale_factor=timescale_factor, label=label,
constraints=constraints,
max_atoms_per_core=(SpikeSourceArray.
_model_based_max_atoms_per_core),
board_address=board_address,
receive_port=None, receive_sdp_port=None, receive_tag=None,
virtual_key=None, prefix=None, prefix_type=None, check_keys=False,
send_buffer_times=spike_times,
send_buffer_max_space=max_on_chip_memory_usage_for_spikes_in_bytes,
send_buffer_space_before_notify=space_before_notification,
send_buffer_notification_ip_address=self._ip_address,
send_buffer_notification_port=self._port,
send_buffer_notification_tag=tag)
AbstractSpikeRecordable.__init__(self)
AbstractProvidesOutgoingPartitionConstraints.__init__(self)
SimplePopulationSettable.__init__(self)
AbstractChangableAfterRun.__init__(self)
AbstractHasFirstMachineTimeStep.__init__(self)
# handle recording
self._spike_recorder = EIEIOSpikeRecorder(machine_time_step)
self._spike_recorder_buffer_size = spike_recorder_buffer_size
self._buffer_size_before_receive = buffer_size_before_receive
# Keep track of any previously generated buffers
self._send_buffers = dict()
self._spike_recording_region_size = None
self._partitioned_vertices = list()
self._partitioned_vertices_current_max_buffer_size = dict()
# used for reset and rerun
self._requires_mapping = True
self._last_runtime_position = 0
self._max_on_chip_memory_usage_for_spikes = \
max_on_chip_memory_usage_for_spikes_in_bytes
self._space_before_notification = space_before_notification
if self._max_on_chip_memory_usage_for_spikes is None:
self._max_on_chip_memory_usage_for_spikes = \
front_end_common_constants.MAX_SIZE_OF_BUFFERED_REGION_ON_CHIP
# check the values do not conflict with chip memory limit
if self._max_on_chip_memory_usage_for_spikes < 0:
raise exceptions.ConfigurationException(
"The memory usage on chip is either beyond what is supportable"
" on the spinnaker board being supported or you have requested"
" a negative value for a memory usage. Please correct and"
" try again")
if (self._max_on_chip_memory_usage_for_spikes <
self._space_before_notification):
self._space_before_notification =\
self._max_on_chip_memory_usage_for_spikes
@property
def requires_mapping(self):
return self._requires_mapping
def mark_no_changes(self):
self._requires_mapping = False
@property
def spike_times(self):
""" The spike times of the spike source array
:return:
"""
return self.send_buffer_times
@spike_times.setter
def spike_times(self, spike_times):
""" Set the spike source array's spike times. Not an extend, but an\
actual change
:param spike_times:
:return:
"""
self.send_buffer_times = spike_times
# @implements AbstractSpikeRecordable.is_recording_spikes
def is_recording_spikes(self):
return self._spike_recorder.record
# @implements AbstractSpikeRecordable.set_recording_spikes
def set_recording_spikes(self):
self.enable_recording(
self._ip_address, self._port, self._board_address,
self._send_buffer_notification_tag,
self._spike_recorder_buffer_size,
self._buffer_size_before_receive,
self._minimum_sdram_for_buffering,
self._using_auto_pause_and_resume)
self._requires_mapping = not self._spike_recorder.record
self._spike_recorder.record = True
def get_spikes(self, placements, graph_mapper, buffer_manager):
return self._spike_recorder.get_spikes(
self.label, buffer_manager,
(ReverseIPTagMulticastSourcePartitionedVertex.
_REGIONS.RECORDING_BUFFER.value),
(ReverseIPTagMulticastSourcePartitionedVertex.
_REGIONS.RECORDING_BUFFER_STATE.value),
placements, graph_mapper, self,
lambda subvertex:
subvertex.virtual_key if subvertex.virtual_key is not None
else 0)
@property
def model_name(self):
return "SpikeSourceArray"
@staticmethod
def set_model_max_atoms_per_core(new_value):
SpikeSourceArray._model_based_max_atoms_per_core = new_value
def set_first_machine_time_step(self, first_machine_time_step):
self.first_machine_time_step = first_machine_time_step | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/spike_source/spike_source_array.py | 0.633183 | 0.270173 | spike_source_array.py | pypi |
from spynnaker.pyNN.models.spike_source.spike_source_array import \
SpikeSourceArray
from spynnaker.pyNN import utility_calls
# general imports
import numpy
class SpikeSourceFromFile(SpikeSourceArray):
""" SpikeSourceArray that works from a file
"""
def __init__(
self, n_neurons, spike_time_file, machine_time_step,
timescale_factor, port=None, tag=None, ip_address=None,
board_address=None, min_atom=None, max_atom=None, min_time=None,
max_time=None, max_on_chip_memory_usage_for_spikes_in_bytes=None,
constraints=None, split_value="\t", label="SpikeSourceArray"):
spike_times = utility_calls.read_spikes_from_file(
spike_time_file, min_atom, max_atom, min_time, max_time,
split_value)
SpikeSourceArray.__init__(
self, n_neurons, spike_times, machine_time_step,
timescale_factor, port=port,
tag=tag, ip_address=ip_address, board_address=board_address,
max_on_chip_memory_usage_for_spikes_in_bytes=(
max_on_chip_memory_usage_for_spikes_in_bytes),
constraints=constraints, label=label)
@staticmethod
def _subsample_spikes_by_time(spike_array, start, stop, step):
"""
:param spike_array:
:param start:
:param stop:
:param step:
:return:
"""
sub_sampled_array = {}
for neuron in spike_array:
times = [t for t in spike_array[neuron] if start <= t < stop]
interval = step / 2
t_start = times[0]
t_last = len(times)
t_index = 0
spikes_in_interval = 0
subsampled_times = []
while t_index < t_last:
spikes_in_interval = 0
while (t_index < t_last and
times[t_index] <= t_start + interval):
spikes_in_interval += 1
if spikes_in_interval >= interval:
t_start = times[t_index] + interval
subsampled_times.append(times[t_index])
try:
t_index = next(i for i in range(t_index, t_last)
if times[i] >= t_start)
except StopIteration:
t_index = t_last
break
t_index += 1
else:
t_start = t_index
sub_sampled_array[neuron] = subsampled_times
return sub_sampled_array
@staticmethod
def _convert_spike_list_to_timed_spikes(
spike_list, min_idx, max_idx, tmin, tmax, tstep):
times = numpy.array(range(tmin, tmax, tstep))
spike_ids = sorted(spike_list)
possible_neurons = range(min_idx, max_idx)
spike_array = dict([(neuron, times) for neuron in spike_ids
if neuron in possible_neurons])
return spike_array
@property
def spike_times(self):
return self._spike_times | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/models/spike_source/spike_source_from_file.py | 0.714628 | 0.457318 | spike_source_from_file.py | pypi |
from spynnaker.pyNN.utilities.random_stats.random_stats_scipy_impl \
import RandomStatsScipyImpl
from spynnaker.pyNN.utilities.random_stats.random_stats_uniform_impl \
import RandomStatsUniformImpl
from spynnaker.pyNN.models.neural_properties.randomDistributions \
import RandomDistribution
from spinn_front_end_common.utilities import exceptions
import numpy
import os
import logging
from scipy.stats import binom
logger = logging.getLogger(__name__)
def check_directory_exists_and_create_if_not(filename):
"""
helper method for checking if a directory exists, and if not, create it
:param filename:
:return:
"""
directory = os.path.dirname(filename)
if directory != "" and not os.path.exists(directory):
os.makedirs(directory)
def convert_param_to_numpy(param, no_atoms):
"""
converts parameters into numpy arrays as needed
:param param: the param to convert
:param no_atoms: the number of atoms avilable for conversion of param
:return the converted param in whatever format it was given
"""
if RandomDistribution is None:
raise exceptions.ConfigurationException(
"Missing PyNN. Please install version 0.7.5 from "
"http://neuralensemble.org/PyNN/")
if isinstance(param, RandomDistribution):
if no_atoms > 1:
return numpy.asarray(param.next(n=no_atoms), dtype="float")
else:
return numpy.array([param.next(n=no_atoms)], dtype="float")
elif not hasattr(param, '__iter__'):
return numpy.array([param] * no_atoms, dtype="float")
elif len(param) != no_atoms:
raise exceptions.ConfigurationException("The number of params does"
" not equal with the number"
" of atoms in the vertex ")
else:
return numpy.array(param, dtype="float")
def write_parameters_per_neuron(spec, vertex_slice, parameters):
for atom in range(vertex_slice.lo_atom, vertex_slice.hi_atom + 1):
for param in parameters:
value = param.get_value()
if hasattr(value, "__len__"):
if len(value) > 1:
value = value[atom]
else:
value = value[0]
spec.write_value(data=value,
data_type=param.get_dataspec_datatype())
def read_in_data_from_file(
file_path, min_atom, max_atom, min_time, max_time):
"""method for helping code read in files of data values where the values are
in a format of <Time><tab><atom_id><tab><data_value>
:param file_path: absolute filepath to a file where gsyn values have been
written
:param min_atom: min neuron id to which neurons to read in
:param max_atom: max neuron id to which neurons to read in
:param min_time: min time slot to read neurons values of.
:param max_time:max time slot to read neurons values of.
:return: a numpi destacked array containing time stamps, neuron id and the
data value.
"""
times = list()
atom_ids = list()
data_items = list()
with open(file_path, 'r') as fsource:
read_data = fsource.readlines()
for line in read_data:
if not line.startswith('#'):
values = line.split("\t")
neuron_id = int(eval(values[1]))
time = float(eval(values[0]))
data_value = float(eval(values[2]))
if (min_atom <= neuron_id < max_atom and
min_time <= time < max_time):
times.append(time)
atom_ids.append(neuron_id)
data_items.append(data_value)
else:
print "failed to enter {}:{}".format(neuron_id, time)
result = numpy.dstack((atom_ids, times, data_items))[0]
result = result[numpy.lexsort((times, atom_ids))]
return result
def read_spikes_from_file(file_path, min_atom, max_atom, min_time, max_time,
split_value="\t"):
"""
helper method for reading spikes from a file
:param file_path: absolute filepath to a file where spike values have been
written
:param min_atom: min neuron id to which neurons to read in
:param max_atom: max neuron id to which neurons to read in
:param min_time: min time slot to read neurons values of.
:param max_time:max time slot to read neurons values of.
:param split_value: the pattern to split by
:return: a numpi destacked array containing time stamps, neuron id and the
spike times.
"""
with open(file_path, 'r') as fsource:
read_data = fsource.readlines()
data = dict()
max_atom_found = 0
for line in read_data:
if not line.startswith('#'):
values = line.split(split_value)
time = float(eval(values[0]))
neuron_id = int(eval(values[1]))
if ((min_atom is None or min_atom <= neuron_id) and
(max_atom is None or neuron_id < max_atom) and
(min_time is None or min_time <= time) and
(max_time is None or time < max_time)):
if neuron_id not in data:
data[neuron_id] = list()
data[neuron_id].append(time)
if max_atom is None and neuron_id > max_atom_found:
max_atom_found = neuron_id
if max_atom is None:
result = numpy.ndarray(shape=max_atom_found, dtype=object)
else:
result = numpy.ndarray(shape=max_atom, dtype=object)
for neuron_id in range(0, max_atom):
if neuron_id in data:
result[neuron_id] = data[neuron_id]
else:
result[neuron_id] = list()
return result
# Converts between a distribution name, and the appropriate scipy stats for\
# that distribution
_distribution_to_stats = {
'binomial': RandomStatsScipyImpl("binom"),
'gamma': RandomStatsScipyImpl("gamma"),
'exponential': RandomStatsScipyImpl("expon"),
'lognormal': RandomStatsScipyImpl("lognorm"),
'normal': RandomStatsScipyImpl("norm"),
'poisson': RandomStatsScipyImpl("poisson"),
'uniform': RandomStatsUniformImpl(),
'randint': RandomStatsScipyImpl("randint"),
'vonmises': RandomStatsScipyImpl("vonmises"),
}
def get_probable_maximum_selected(
n_total_selections, n_selected, selection_prob, chance=(1.0 / 100.0)):
""" Get the likely maximum number of items that will be selected from a\
set of n_selected from a total set of n_total_selections\
with a probability of selection of selection_prob
"""
prob = 1.0 - (chance / float(n_total_selections))
return binom.ppf(prob, n_selected, selection_prob)
def get_probability_within_range(dist, lower, upper):
""" Get the probability that a value will fall within the given range for\
a given RandomDistribution dist
"""
stats = _distribution_to_stats[dist.name]
return (stats.cdf(dist, upper) - stats.cdf(dist, lower))
def get_maximum_probable_value(dist, n_items, chance=(1.0 / 100.0)):
""" Get the likely maximum value of a RandomDistribution given a\
number of draws
"""
stats = _distribution_to_stats[dist.name]
prob = 1.0 - (chance / float(n_items))
return stats.ppf(dist, prob)
def get_minimum_probable_value(dist, n_items, chance=(1.0 / 100.0)):
""" Get the likely minimum value of a RandomDistribution given a\
number of draws
"""
stats = _distribution_to_stats[dist.name]
prob = chance / float(n_items)
return stats.ppf(dist, prob)
def get_mean(dist):
""" Get the mean of a RandomDistribution
"""
stats = _distribution_to_stats[dist.name]
return stats.mean(dist)
def get_standard_deviation(dist):
""" Get the standard deviation of a RandomDistribution
"""
stats = _distribution_to_stats[dist.name]
return stats.std(dist)
def get_variance(dist):
""" Get the variance of a RandomDistribution
"""
stats = _distribution_to_stats[dist.name]
return stats.var(dist) | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/utilities/utility_calls.py | 0.747339 | 0.465205 | utility_calls.py | pypi |
from spynnaker.pyNN.exceptions import SpynnakerException
class MultiCastCommand(object):
""" A command to be sent to a vertex
"""
def __init__(self, time, key, mask=0xFFFFFFFF, payload=None, repeat=0,
delay_between_repeats=0):
"""
:param time: The time within the simulation at which to send the\
commmand. 0 or a positive value indicates the number of\
timesteps after the start of the simulation at which\
the command is to be sent. A negative value indicates the\
(number of timesteps - 1) before the end of simulation at\
which the command is to be sent (thus -1 means the last\
timestep of the simulation).
:type time: int
:param key: The key of the command
:type key: int
:param mask: A mask to indicate the important bits of the command key.\
By default, all bits are assumed to be important, but this\
can be used to optimize the sending of a group of commands
:type mask: int
:param payload: The payload of the command
:type payload: int
:param repeat: The number of times that the command should be\
repeated after sending it once. This could be used to\
ensure that the command is sent despite lost packets.\
Must be between 0 and 65535
:type repeat: int
:param delay_between_repeats: The amount of time in micro seconds to\
wait between sending repeats of the same command.\
Must be between 0 and 65535, and must be 0 if repeat is 0
:type delay_between_repeats: int
:raise SpynnakerException: If the repeat or delay are out of range
"""
if repeat < 0 or repeat > 0xFFFF:
raise SpynnakerException("repeat must be between 0 and 65535")
if delay_between_repeats < 0 or delay_between_repeats > 0xFFFF:
raise SpynnakerException(
"delay_between_repeats must be between 0 and 65535")
if delay_between_repeats > 0 and repeat == 0:
raise SpynnakerException(
"If repeat is 0, delay_betweeen_repeats must be 0")
self._time = time
self._key = key
self._mask = mask
self._payload = payload
self._repeat = repeat
self._delay_between_repeats = delay_between_repeats
@property
def time(self):
return self._time
@property
def key(self):
return self._key
@property
def mask(self):
return self._mask
@property
def repeat(self):
return self._repeat
@property
def delay_between_repeats(self):
return self._delay_between_repeats
def get_payload(self, routing_info, partitioned_graph, graph_mapper):
""" Get the payload of the command. By default, this just returns the\
payload in the packet, but this can be overridden to compute the\
payload from the routing information if so required. This will be\
called after mapping, during data specification generation.
:param routing_info: The routing information generated during mapping\
from which edge keys can be obtained
:type routing_info: \
:py:class:`pacman.model.routing_info.routing_info.RoutingInfo`
:param partitioned_graph: The partitioned graph for which the routing\
information was obtained
:type partitioned_graph: \
:py:class:`pacman.model.partitioned_graph.partitioned_graph.PartitionedGraph`
:param graph_mapper: The mapper between the partitioned and\
partitionable graphs
:type graph_mapper: \
:py:class:`pacman.model.graph_mapper.graph_mapper.GraphMapper`
:return: The payload of the command, or None if there is no payload
:rtype: int
"""
return self._payload
def is_payload(self):
""" Determine if this command has a payload. By default, this returns\
True if the payload passed in to the constructor is not None, but\
this can be overridden to indicate that a payload will be\
generated, despite None being passed to the constructor
:return: True if there is a payload, False otherwise
:rtype: bool
"""
return self._payload is not None | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/utilities/multi_cast_command.py | 0.882168 | 0.571109 | multi_cast_command.py | pypi |
import logging
import re
levels = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
class ConfiguredFilter(object):
def __init__(self, conf):
self._levels = ConfiguredFormatter.construct_logging_parents(conf)
self._default_level = levels[conf.get("Logging", "default")]
def filter(self, record):
"""Get the level for the deepest parent, and filter appropriately."""
level = ConfiguredFormatter.level_of_deepest_parent(self._levels,
record.name)
if level is None:
return record.levelno >= self._default_level
return record.levelno >= level
class ConfiguredFormatter(logging.Formatter):
def __init__(self, conf):
level = conf.get("Logging", "default")
if level == "debug":
super(ConfiguredFormatter, self).__init__(
fmt="%(asctime)-15s %(levelname)s: %(pathname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
else:
super(ConfiguredFormatter, self).__init__(
fmt="%(asctime)-15s %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
@staticmethod
def construct_logging_parents(conf):
"""Create a dictionary of module names and logging levels."""
# Construct the dictionary
_levels = {}
if not conf.has_section("Logging"):
return _levels
for label, level in levels.items():
if conf.has_option("Logging", label):
modules = map(
lambda s: s.strip(),
conf.get('Logging', label).split(',')
)
if '' not in modules:
_levels.update(
dict(map(lambda m: (m, level), modules)))
return _levels
@staticmethod
def deepest_parent(parents, child):
"""Greediest match between child and parent."""
# TODO: this can almost certainly be neater!
# Repeatedly strip elements off the child until we match an item in
# parents
match = child
while '.' in match and match not in parents:
match = re.sub(r'\.[^.]+$', '', match)
# If no match then return None, there is no deepest parent
if match not in parents:
match = None
return match
@staticmethod
def level_of_deepest_parent(parents, child):
""" The logging level of the greediest match between child and parent.
"""
# child = re.sub( r'^pacman103\.', '', child )
parent = ConfiguredFormatter.deepest_parent(parents.keys(), child)
if parent is None:
return None
return parents[parent] | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/utilities/conf/log.py | 0.575827 | 0.152127 | log.py | pypi |
from spinn_machine.utilities.progress_bar import ProgressBar
from spinn_front_end_common.interface.interface_functions.\
front_end_common_partitionable_graph_data_specification_writer \
import FrontEndCommonPartitionableGraphDataSpecificationWriter
from spinn_front_end_common.utilities.utility_objs.executable_targets \
import ExecutableTargets
from spynnaker.pyNN.models.utility_models.delay_extension_vertex \
import DelayExtensionVertex
class SpynnakerDataSpecificationWriter(
FrontEndCommonPartitionableGraphDataSpecificationWriter):
""" Executes data specification generation for sPyNNaker
"""
def __call__(
self, placements, graph_mapper, tags, executable_finder,
partitioned_graph, partitionable_graph, routing_infos, hostname,
report_default_directory, write_text_specs,
app_data_runtime_folder):
# Keep the results
executable_targets = ExecutableTargets()
dsg_targets = dict()
# Keep delay extensions until the end
delay_extension_placements = list()
# create a progress bar for end users
progress_bar = ProgressBar(len(list(placements.placements)),
"Generating sPyNNaker data specifications")
for placement in placements.placements:
associated_vertex = graph_mapper.get_vertex_from_subvertex(
placement.subvertex)
if isinstance(associated_vertex, DelayExtensionVertex):
delay_extension_placements.append(
(placement, associated_vertex))
else:
self._generate_data_spec_for_subvertices(
placement, associated_vertex, executable_targets,
dsg_targets, graph_mapper, tags, executable_finder,
partitioned_graph, partitionable_graph, routing_infos,
hostname, report_default_directory, write_text_specs,
app_data_runtime_folder)
progress_bar.update()
for placement, associated_vertex in delay_extension_placements:
self._generate_data_spec_for_subvertices(
placement, associated_vertex, executable_targets,
dsg_targets, graph_mapper, tags, executable_finder,
partitioned_graph, partitionable_graph, routing_infos,
hostname, report_default_directory, write_text_specs,
app_data_runtime_folder)
progress_bar.update()
# finish the progress bar
progress_bar.end()
return {'executable_targets': executable_targets,
'dsg_targets': dsg_targets} | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/overridden_pacman_functions/spynnaker_data_specification_writer.py | 0.651577 | 0.150309 | spynnaker_data_specification_writer.py | pypi |
from pacman.model.partitionable_graph.multi_cast_partitionable_edge \
import MultiCastPartitionableEdge
from pacman.model.partitioned_graph.partitioned_graph import PartitionedGraph
from pacman.model.graph_mapper.graph_mapper \
import GraphMapper
from spinn_machine.utilities.progress_bar import ProgressBar
# spynnaker imports
from spynnaker.pyNN import exceptions
from spynnaker.pyNN.models.abstract_models.abstract_filterable_edge \
import AbstractFilterableEdge
import logging
logger = logging.getLogger(__name__)
class GraphEdgeFilter(object):
""" Removes graph edges that aren't required
"""
def __call__(self, subgraph, graph_mapper):
"""
:param subgraph: the subgraph whose edges are to be filtered
:param graph_mapper: the graph mapper between partitionable and \
partitioned graphs.
:return: a new graph mapper and partitioned graph
"""
new_sub_graph = PartitionedGraph(label=subgraph.label)
new_graph_mapper = GraphMapper(graph_mapper.first_graph_label,
subgraph.label)
# create progress bar
progress_bar = ProgressBar(
len(subgraph.subvertices) + len(subgraph.subedges),
"Filtering edges")
# add the subverts directly, as they wont be pruned.
for subvert in subgraph.subvertices:
new_sub_graph.add_subvertex(subvert)
associated_vertex = graph_mapper.get_vertex_from_subvertex(subvert)
vertex_slice = graph_mapper.get_subvertex_slice(subvert)
new_graph_mapper.add_subvertex(
subvertex=subvert, vertex_slice=vertex_slice,
vertex=associated_vertex)
progress_bar.update()
# start checking subedges to decide which ones need pruning....
for subvert in subgraph.subvertices:
out_going_partitions = \
subgraph.outgoing_edges_partitions_from_vertex(subvert)
for partitioner_identifier in out_going_partitions:
for subedge in \
out_going_partitions[partitioner_identifier].edges:
if not self._is_filterable(subedge, graph_mapper):
logger.debug("this subedge was not pruned {}"
.format(subedge))
new_sub_graph.add_subedge(subedge,
partitioner_identifier)
associated_edge = graph_mapper.\
get_partitionable_edge_from_partitioned_edge(
subedge)
new_graph_mapper.add_partitioned_edge(
subedge, associated_edge)
else:
logger.debug("this subedge was pruned {}"
.format(subedge))
progress_bar.update()
progress_bar.end()
# returned the pruned partitioned_graph and graph_mapper
return {'new_sub_graph': new_sub_graph,
'new_graph_mapper': new_graph_mapper}
@staticmethod
def _is_filterable(subedge, graph_mapper):
associated_edge = \
graph_mapper.get_partitionable_edge_from_partitioned_edge(subedge)
if isinstance(subedge, AbstractFilterableEdge):
return subedge.filter_sub_edge(graph_mapper)
elif isinstance(associated_edge, MultiCastPartitionableEdge):
return False
else:
raise exceptions.FilterableException(
"cannot figure out if subedge {} is prunable or not"
.format(subedge)) | /sPyNNaker-2016.001.001.zip/sPyNNaker-2016.001.001/spynnaker/pyNN/overridden_pacman_functions/graph_edge_filter.py | 0.660172 | 0.363393 | graph_edge_filter.py | pypi |
import inspect as __inspect
import logging as __logging
import os as __os
import numpy as __numpy
import spynnaker7
from pyNN.random import NumpyRNG, RandomDistribution
from pyNN.space import \
distance, Space, Line, Grid2D, Grid3D, Cuboid, Sphere, RandomStructure
from spinn_front_end_common.utilities.exceptions import ConfigurationException
from spinn_front_end_common.utilities import globals_variables
from spynnaker.pyNN.models.neural_projections \
.delay_afferent_application_edge import DelayAfferentApplicationEdge
from spynnaker.pyNN.models.neural_projections.projection_application_edge \
import ProjectionApplicationEdge
from spynnaker.pyNN.models.neuron.builds.if_cond_exp_base \
import IFCondExpBase as IF_cond_exp
from spynnaker.pyNN.models.neuron.builds.if_curr_exp_base \
import IFCurrExpBase as IF_curr_exp
from spynnaker.pyNN.models.neuron.synapse_dynamics.pynn_synapse_dynamics \
import PyNNSynapseDynamics as SynapseDynamics
from spynnaker.pyNN.models.neuron.synapse_dynamics.synapse_dynamics_stdp \
import SynapseDynamicsSTDP as STDPMechanism
from spynnaker.pyNN.models.spike_source.spike_source_array \
import SpikeSourceArray
from spynnaker.pyNN.models.spike_source.spike_source_from_file \
import SpikeSourceFromFile
from spynnaker.pyNN.models.spike_source.spike_source_poisson \
import SpikeSourcePoisson
from spynnaker.pyNN.models.utility_models.delay_extension_vertex \
import DelayExtensionVertex
from spynnaker.pyNN.utilities import utility_calls
from spynnaker7.pyNN.models.connectors.all_to_all_connector \
import AllToAllConnector
from spynnaker7.pyNN.models.connectors. \
distance_dependent_probability_connector import \
DistanceDependentProbabilityConnector
from spynnaker7.pyNN.models.connectors. \
fixed_number_post_connector import FixedNumberPostConnector
from spynnaker7.pyNN.models.connectors. \
fixed_number_pre_connector import FixedNumberPreConnector
from spynnaker7.pyNN.models.connectors. \
fixed_probability_connector import FixedProbabilityConnector
from spynnaker7.pyNN.models.connectors.from_file_connector \
import FromFileConnector
from spynnaker7.pyNN.models.connectors.from_list_connector import \
FromListConnector
from spynnaker7.pyNN.models.connectors.multapse_connector \
import MultapseConnector
from spynnaker7.pyNN.models.connectors.one_to_one_connector \
import OneToOneConnector
from spynnaker7.pyNN.models.plasticity_components.timing_dependence \
.timing_dependence_spike_pair \
import TimingDependenceSpikePair as SpikePairRule
from spynnaker7.pyNN.models.plasticity_components.weight_dependence.\
weight_dependence_additive \
import WeightDependenceAdditive as AdditiveWeightDependence
from spynnaker7.pyNN.models.plasticity_components.weight_dependence \
.weight_dependence_multiplicative \
import WeightDependenceMultiplicative as MultiplicativeWeightDependence
from spynnaker7.pyNN import external_devices
from spynnaker7.pyNN import extra_models
from spynnaker7.pyNN.spinnaker import Spinnaker as __Spinnaker
from spynnaker7._version import __version__ # NOQA
from spynnaker7._version import __version_name__ # NOQA
from spynnaker7._version import __version_month__ # NOQA
from spynnaker7._version import __version_year__ # NOQA
# traditional logger
logger = __logging.getLogger(__name__)
# List of binary search paths
_binary_search_paths = []
__all__ = [
# Ugly, but tests expect it
'utility_calls',
# Implementations of the neuroscience models
'IF_cond_exp', 'IF_curr_exp',
'DelayAfferentApplicationEdge', 'DelayExtensionVertex',
'ProjectionApplicationEdge', 'SpikeSourcePoisson', 'SpikeSourceArray',
'SpikeSourceFromFile', 'AllToAllConnector', 'FixedNumberPreConnector',
'FixedProbabilityConnector', 'FromListConnector', 'FromFileConnector',
'MultapseConnector', 'OneToOneConnector', 'FixedNumberPostConnector',
'DistanceDependentProbabilityConnector', 'SynapseDynamics',
'STDPMechanism', 'AdditiveWeightDependence', 'SpikePairRule',
'MultiplicativeWeightDependence',
# Stuff from pyNN.random
'NumpyRNG', 'RandomDistribution',
# Stuff from pyNN.space
'distance', 'Space', 'Line', 'Grid2D', 'Grid3D', 'Cuboid', 'Sphere',
'RandomStructure',
# External devices and extra models
'external_devices', 'extra_models',
# Stuff that we define
'end', 'setup', 'run', 'get_spynnaker',
'num_processes', 'rank', 'reset', 'set_number_of_neurons_per_core',
'Population', 'Projection',
'NativeRNG', 'get_current_time', 'create', 'connect', 'get_time_step',
'get_min_delay', 'get_max_delay', 'set', 'initialize', 'record',
'record_v', 'record_gsyn', 'get_machine']
def end():
"""
Do any necessary cleaning up before exiting.
Unregisters the controller,
prints any data recorded using the low-level API
"""
globals_variables.get_simulator().stop()
# _spinnaker = None
def get_spynnaker():
"""helper method for other plugins to add stuff to the graph
:return: The current spinnaker API, or None if before setup or after end.
"""
return globals_variables.get_simulator()
def num_processes():
""" Return the number of MPI processes
(not used for SpiNNaker, always returns 1)
"""
return 1
def rank():
""" Return the MPI rank of the current node. (not used for SpiNNaker,\
always returns 0 - as this is the minimum rank suggesting the front\
node)
"""
return 0
def reset():
""" Reset the time to zero, and start the clock.
"""
globals_variables.get_not_running_simulator().reset()
def run(run_time=None):
""" Run the simulation for run_time ms.
:param run_time: simulation length (in ms)
"""
globals_variables.get_simulator().run(run_time)
return None
def setup(timestep=0.1, min_delay=None, max_delay=None, machine=None,
database_socket_addresses=None, n_chips_required=None,
**extra_params):
""" Should be called at the very beginning of a script.
extra_params contains any keyword arguments that are required by a\
given simulator but not by others.
:param machine: A SpiNNaker machine used to run the simulation.
:param timestep: The timestep in milleseconds.\
Value will be rounded up to whole microseconds.\
Set to None to use the value from the config file
:param min_delay: the minumum number of time steps supported for delays
:param max_delay: the maximum number of time steps supported for delays
:param machine: The machine ip address
:param database_socket_addresses: the set of sockets needed to be listened
to for database notification protocol
:param n_chips_required: The number of chips required for the simulation
:param extra_params: random other crap
:rtype: float or None
"""
global _binary_search_paths
logger.info(
"sPyNNaker (c) {} APT Group, University of Manchester".format(
__version_year__))
parent_dir = __os.path.split(__os.path.split(spynnaker7.__file__)[0])[0]
logger.info(
"Release version {}({}) - {} {}. Installed in folder {}".format(
__version__, __version_name__, __version_month__, __version_year__,
parent_dir))
if len(extra_params) > 0:
logger.warn("Extra params {} have been applied to the setup "
"command which we do not consider".format(extra_params))
__Spinnaker(
host_name=machine, timestep=timestep, min_delay=min_delay,
max_delay=max_delay,
database_socket_addresses=database_socket_addresses,
n_chips_required=n_chips_required)
# the PyNN API expects the MPI rank to be returned
return rank()
def set_number_of_neurons_per_core(neuron_type, max_permitted):
""" Sets a ceiling on the number of neurons of a given type that can be\
placed on a single core.
:param neuron_type: the neuron type that will have its max atoms set
:param max_permitted: The max amount of atoms to be set
:type neuron_type: The string reprensetation of the neuron type
:type max_permitted: int
:rtype: None
"""
if not __inspect.isclass(neuron_type):
if neuron_type in globals():
neuron_type = globals()[neuron_type]
else:
raise Exception("Unknown Vertex Type {}".format(neuron_type))
simulator = globals_variables.get_not_running_simulator()
simulator.set_number_of_neurons_per_core(neuron_type, max_permitted)
# noinspection PyPep8Naming
def Population(size, cellclass, cellparams, structure=None, label=None):
""" building a new pop
:param size: n neurons
:param cellclass: the neuron class that needs to be created
:param cellparams: the params to put into the neuron model
:param structure: ??????
:param label: the human readable label
:return: a new population object
"""
globals_variables.get_simulator().verify_not_running()
return globals_variables.get_not_running_simulator().create_population(
size, cellclass, cellparams, structure, label)
# noinspection PyPep8Naming
def Projection(presynaptic_population, postsynaptic_population,
connector, source=None, target='excitatory',
synapse_dynamics=None, label=None, rng=None):
""" builds a new projection object
:param presynaptic_population: the source pop
:param postsynaptic_population: the dest pop
:param connector: the connector describing connecitivty
:param source: ??????????
:param target: type of synapse, exicitiatory or inhibitoary for example.
:param synapse_dynamics: plasticity
:param label: human readable label
:param rng: random number generator if needed
:return: a new Projection object
"""
globals_variables.get_simulator().verify_not_running()
return globals_variables.get_not_running_simulator().create_projection(
presynaptic_population, postsynaptic_population, connector, source,
target, synapse_dynamics, label, rng)
def NativeRNG(seed_value):
""" Fixes the random number generator's seed
:param seed_value:
:return:
"""
__numpy.random.seed(seed_value)
def get_current_time():
"""
returns the machine time step defined in setup
:return: the runtime currently
"""
return globals_variables.get_simulator().get_current_time()
# =============================================================================
# Low-level API for creating, connecting and recording from individual neurons
# =============================================================================
def create(cellclass, cellparams=None, n=1):
""" Create n cells all of the same type.
If n > 1, return a list of cell ids/references.
If n==1, return just the single id.
"""
if cellparams is None:
cellparams = {}
return Population(n, cellclass, cellparams)
def connect(source, target, weight=0.0, delay=None, synapse_type="excitatory",
p=1, rng=None):
""" Connect a source of spikes to a synaptic target.
source and target can both be individual cells or lists of cells, in
which case all possible connections are made with probability p, using
either the random number generator supplied, or the default rng
otherwise. Weights should be in nA or uS.
"""
connector = FixedProbabilityConnector(
p_connect=p, weights=weight, delays=delay)
return Projection(source, target, connector, target=synapse_type, rng=rng)
def get_time_step():
""" The timestep requested
:return:
"""
return globals_variables.get_simulator().machine_time_step
def get_min_delay():
""" The minimum allowed synaptic delay.
:return:
"""
return globals_variables.get_simulator().min_delay
def get_max_delay():
""" The maximum allowed synaptic delay.
:return:
"""
return globals_variables.get_simulator().max_delay
def set(cells, param, val=None): # @ReservedAssignment
""" Set one or more parameters of an individual cell or list of cells.
param can be a dict, in which case val should not be supplied, or a string
giving the parameter name, in which case val is the parameter value.
"""
assert isinstance(cells, Population)
cells.set(param, val)
def initialize(cells, variable, value):
cells.initialize(variable, value)
def record(source, filename):
""" Record spikes to a file. source should be a Population.
"""
source.record(to_file=filename)
def record_v(source, filename):
""" Record spikes to a file. source should be a Population.
"""
source.record_v(to_file=filename)
def record_gsyn(source, filename):
""" Record spikes to a file. source should be a Population.
"""
source.record_gsyn(to_file=filename)
def get_machine():
""" Get the spinnaker machine in use
"""
if not globals_variables.has_simulator():
raise ConfigurationException(
"You currently have not ran setup, please do so before calling "
"get_machine")
return globals_variables.get_simulator().machine | /sPyNNaker7-1!4.0.0.tar.gz/sPyNNaker7-1!4.0.0/spynnaker7/pyNN/__init__.py | 0.642208 | 0.22173 | __init__.py | pypi |
from spynnaker.pyNN.models.neuron.synapse_dynamics.synapse_dynamics_static \
import SynapseDynamicsStatic
from spynnaker.pyNN.models.neuron.abstract_population_vertex \
import AbstractPopulationVertex
from spynnaker.pyNN.models.pynn_projection_common import PyNNProjectionCommon
from spinn_front_end_common.utilities.exceptions import ConfigurationException
import logging
logger = logging.getLogger(__name__)
# noinspection PyProtectedMember
class Projection(PyNNProjectionCommon):
""" A container for all the connections of a given type (same synapse type\
and plasticity mechanisms) between two populations, together with\
methods to set parameters of those connections, including of\
plasticity mechanisms.
"""
# noinspection PyUnusedLocal
def __init__(
self, presynaptic_population, postsynaptic_population, label,
connector, spinnaker_control, machine_time_step, user_max_delay,
timescale_factor, source=None, target='excitatory',
synapse_dynamics=None, rng=None):
synapse_dynamics_stdp = None
if synapse_dynamics is None:
synapse_dynamics_stdp = SynapseDynamicsStatic()
else:
synapse_dynamics_stdp = synapse_dynamics.slow
PyNNProjectionCommon.__init__(
self, spinnaker_control=spinnaker_control, connector=connector,
synapse_dynamics_stdp=synapse_dynamics_stdp,
target=target, pre_synaptic_population=presynaptic_population,
post_synaptic_population=postsynaptic_population,
rng=rng, machine_time_step=machine_time_step,
user_max_delay=user_max_delay, label=label,
time_scale_factor=spinnaker_control.time_scale_factor)
if not isinstance(postsynaptic_population._get_vertex,
AbstractPopulationVertex):
raise ConfigurationException(
"postsynaptic population is not designed to receive"
" synaptic projections")
def describe(self, template='projection_default.txt', engine='default'):
""" Return a human-readable description of the projection.
The output may be customised by specifying a different template
together with an associated template engine (see ``pyNN.descriptions``)
If template is None, then a dictionary containing the template context
will be returned.
"""
# TODO
raise NotImplementedError
def __getitem__(self, i):
"""Return the `i`th connection within the Projection."""
# TODO: Need to work out what is being returned
raise NotImplementedError
# noinspection PyPep8Naming
def getSynapseDynamics(
self, parameter_name, format='list', # @ReservedAssignment
gather=True): # @UnusedVariable
""" Get parameters of the dynamic synapses for all connections in this\
Projection.
:param parameter_name:
:param format:
:param gather:
"""
if not gather:
logger.warn("Spynnaker always gathers from every core.")
fixed_value = self._synapse_information.synapse_dynamics.get_value(
parameter_name)
return self._get_synaptic_data(
format == "list", None, [(parameter_name, fixed_value)])
# noinspection PyPep8Naming
def getWeights(
self, format='list', # @ReservedAssignment
gather=True): # @UnusedVariable
"""
Get synaptic weights for all connections in this Projection.
Possible formats are: a list of length equal to the number of
connections in the projection, a 2D weight array (with NaN for
non-existent connections). Note that for the array format, if there is
more than connection between two cells, the summed weight will be
given.
:param format: the type of format to be returned (only support "list")
:param gather: gather the weights from stuff. currently has no meaning\
in spinnaker when set to false. Therefore is always true
"""
logger.info("Getting weights from Projection {}".format(self._label))
return self._get_synaptic_data(format == "list", ["weight"])
# noinspection PyPep8Naming
def getDelays(self, format='list', gather=True): # @ReservedAssignment
"""
Get synaptic delays for all connections in this Projection.
Possible formats are: a list of length equal to the number of
connections in the projection, a 2D delay array (with NaN for
non-existent connections).
"""
return self._get_synaptic_data(format == "list", ["weight"])
def __len__(self):
""" Return the total number of local connections.
"""
# TODO: Need to work out what this means
raise NotImplementedError
# noinspection PyPep8Naming
def printDelays(self, file_name, list_format='list', gather=True):
""" Print synaptic weights to file. In the array format, zeros are\
printed for non-existent connections.
"""
# TODO:
raise NotImplementedError
# noinspection PyPep8Naming
def printWeights(self, file_name, list_format='list', gather=True):
""" Print synaptic weights to file. In the array format, zeros are\
printed for non-existent connections.
"""
# TODO:
raise NotImplementedError
# noinspection PyPep8Naming
def randomizeWeights(self, rand_distr):
""" Set weights to random values taken from rand_distr.
"""
# TODO: Requires that the synapse list is not created proactively
raise NotImplementedError
# noinspection PyPep8Naming
def randomizeDelays(self, rand_distr):
""" Set delays to random values taken from rand_distr.
"""
# TODO: Requires that the synapse list is not created proactively
raise NotImplementedError
# noinspection PyPep8Naming
def randomizeSynapseDynamics(self, param, rand_distr):
""" Set parameters of the synapse dynamics to values taken from\
rand_distr
"""
# TODO: Look at what this is randomising
raise NotImplementedError
# noinspection PyPep8Naming
def saveConnections(self, file_name, gather=True, compatible_output=True):
""" Save connections to file in a format suitable for reading in with\
a FromFileConnector.
"""
# TODO
raise NotImplementedError
# noinspection PyPep8Naming
def setDelays(self, d):
""" Set the delays
d can be a single number, in which case all delays are set to this\
value, or a list/1D array of length equal to the number of connections\
in the projection, or a 2D array with the same dimensions as the\
connectivity matrix (as returned by `getDelays(format='array')`).
"""
# TODO: Requires that the synapse list is not created proactively
raise NotImplementedError
# noinspection PyPep8Naming
def setSynapseDynamics(self, param, value):
""" Set parameters of the dynamic synapses for all connections in this\
projection.
"""
# TODO: Need to set this in the edge
raise NotImplementedError
# noinspection PyPep8Naming
def setWeights(self, w):
""" Set the weights
w can be a single number, in which case all weights are set to this\
value, or a list/1D array of length equal to the number of connections\
in the projection, or a 2D array with the same dimensions as the\
connectivity matrix (as returned by `getWeights(format='array')`).\
Weights should be in nA for current-based and uS for conductance-based\
synapses.
"""
# TODO: Requires that the synapse list is not created proactively
raise NotImplementedError
# noinspection PyPep8Naming
def weightHistogram(self, min_weight=None, max_weight=None, nbins=10):
""" Return a histogram of synaptic weights.
If min and max are not given, the minimum and maximum weights are\
calculated automatically.
"""
# TODO
raise NotImplementedError | /sPyNNaker7-1!4.0.0.tar.gz/sPyNNaker7-1!4.0.0/spynnaker7/pyNN/models/pynn_projection.py | 0.615203 | 0.397588 | pynn_projection.py | pypi |
from spynnaker.pyNN.models.neural_projections.connectors. \
distance_dependent_probability_connector import \
DistanceDependentProbabilityConnector as \
CommonDistanceDependentProbabilityConnector
from pyNN.space import Space
class DistanceDependentProbabilityConnector(
CommonDistanceDependentProbabilityConnector):
""" Make connections using a distribution which varies with distance.
"""
def __init__(
self, d_expression, weights=0.0, delays=1,
allow_self_connections=True, space=Space(),
safe=True, verbose=False, n_connections=None):
"""
:param `string` d_expression:
the right-hand side of a valid python expression for
probability, involving 'd', e.g. "exp(-abs(d))", or "d<3",
that can be parsed by eval(), that computes the distance
dependent distribution
:param `bool` allow_self_connections:
if the connector is used to connect a
Population to itself, this flag determines whether a neuron is
allowed to connect to itself, or only to other neurons in the
Population.
:param `pyNN.Space` space:
a Space object, needed if you wish to specify distance-
dependent weights or delays
:param `int` n_connections:
The number of efferent synaptic connections per neuron.
:param safe: if True, check that weights and delays have valid values.
If False, this check is skipped.
:param `float` weights:
may either be a float, a !RandomDistribution object, a list/
1D array with at least as many items as connections to be
created, or a distance dependence as per a d_expression. Units nA.
:param `float` delays: -- as `weights`. If `None`, all synaptic delays
will be set to the global minimum delay.
"""
CommonDistanceDependentProbabilityConnector.__init__(
self, d_expression=d_expression,
allow_self_connections=allow_self_connections, safe=safe,
verbose=verbose, n_connections=n_connections)
self.set_weights_and_delays(weights, delays)
self.set_space(space) | /sPyNNaker7-1!4.0.0.tar.gz/sPyNNaker7-1!4.0.0/spynnaker7/pyNN/models/connectors/distance_dependent_probability_connector.py | 0.924972 | 0.647645 | distance_dependent_probability_connector.py | pypi |
import logging
# fec imports
from spinn_front_end_common.abstract_models \
import AbstractSendMeMulticastCommandsVertex
from spinn_front_end_common.utilities import globals_variables
from spinn_front_end_common.utilities.notification_protocol \
import SocketAddress
from spinn_front_end_common.utility_models import LivePacketGather
from spinn_front_end_common.utilities.notification_protocol \
import SocketAddress as __SockAddr
# spinnman imports
from spinnman.messages.eieio.eieio_type import EIEIOType
# main
from spynnaker.pyNN.connections \
import EthernetCommandConnection
from spynnaker.pyNN.connections \
import EthernetControlConnection
from spynnaker.pyNN.connections \
import SpynnakerLiveSpikesConnection
# connections
from spynnaker.pyNN.external_devices_models \
import AbstractEthernetController
from spynnaker.pyNN.external_devices_models \
import AbstractEthernetSensor
from spynnaker.pyNN.external_devices_models \
import ArbitraryFPGADevice
from spynnaker.pyNN.external_devices_models \
import ExternalCochleaDevice
from spynnaker.pyNN.external_devices_models \
import ExternalFPGARetinaDevice
from spynnaker.pyNN.external_devices_models \
import MunichMotorDevice
from spynnaker.pyNN.external_devices_models \
import MunichRetinaDevice
# PushBot Ethernet control
from spynnaker.pyNN.external_devices_models.push_bot.\
push_bot_control_modules import PushBotLifEthernet
# PushBotSpiNNakerLink control
from spynnaker.pyNN.external_devices_models.push_bot.\
push_bot_control_modules import PushBotLifSpinnakerLink
from spynnaker.pyNN.external_devices_models.push_bot \
.push_bot_ethernet import PushBotEthernetLaserDevice
from spynnaker.pyNN.external_devices_models.push_bot \
.push_bot_ethernet import PushBotEthernetLEDDevice
from spynnaker.pyNN.external_devices_models.push_bot \
.push_bot_ethernet import PushBotEthernetMotorDevice
from spynnaker.pyNN.external_devices_models.push_bot \
.push_bot_ethernet import PushBotEthernetRetinaDevice
from spynnaker.pyNN.external_devices_models.push_bot \
.push_bot_ethernet import PushBotEthernetSpeakerDevice
from spynnaker.pyNN.external_devices_models.push_bot \
.push_bot_spinnaker_link import PushBotSpiNNakerLinkLaserDevice
from spynnaker.pyNN.external_devices_models.push_bot \
.push_bot_spinnaker_link import PushBotSpiNNakerLinkLEDDevice
from spynnaker.pyNN.external_devices_models.push_bot \
.push_bot_spinnaker_link import PushBotSpiNNakerLinkMotorDevice
from spynnaker.pyNN.external_devices_models.push_bot \
.push_bot_spinnaker_link import PushBotSpiNNakerLinkRetinaDevice
from spynnaker.pyNN.external_devices_models.push_bot \
.push_bot_spinnaker_link import PushBotSpiNNakerLinkSpeakerDevice
# PushBot Parameters
from spynnaker.pyNN.external_devices_models.push_bot. \
push_bot_parameters import PushBotLED
from spynnaker.pyNN.external_devices_models.push_bot. \
push_bot_parameters import PushBotMotor
from spynnaker.pyNN.external_devices_models.push_bot. \
push_bot_parameters import PushBotRetinaResolution
from spynnaker.pyNN.external_devices_models.push_bot. \
push_bot_parameters import PushBotLaser
from spynnaker.pyNN.external_devices_models.push_bot. \
push_bot_parameters import PushBotSpeaker
# push bot retina viewer
from spynnaker.pyNN.external_devices_models.push_bot. \
push_bot_parameters import PushBotRetinaViewer
# other plugins
from spynnaker.pyNN.protocols \
import MunichIoSpiNNakerLinkProtocol
from spynnaker.pyNN.spynnaker_external_device_plugin_manager \
import SpynnakerExternalDevicePluginManager
from spynnaker.pyNN.external_devices_models import ExternalDeviceLifControl
# injector
from spynnaker.pyNN.models.utility_models \
import SpikeInjector as ExternalDeviceSpikeInjector
# useful functions
add_database_socket_address = \
SpynnakerExternalDevicePluginManager.add_database_socket_address
activate_live_output_to = \
SpynnakerExternalDevicePluginManager.activate_live_output_to
activate_live_output_for = \
SpynnakerExternalDevicePluginManager.activate_live_output_for
logger = logging.getLogger(__name__)
spynnaker_external_devices = SpynnakerExternalDevicePluginManager()
__all__ = [
"EIEIOType",
# General Devices
"ExternalCochleaDevice", "ExternalFPGARetinaDevice",
"MunichRetinaDevice", "MunichMotorDevice",
"ArbitraryFPGADevice", "PushBotRetinaViewer",
"ExternalDeviceLifControl",
# Pushbot Parameters
"MunichIoSpiNNakerLinkProtocol",
"PushBotLaser", "PushBotLED", "PushBotMotor", "PushBotSpeaker",
"PushBotRetinaResolution",
# Pushbot Ethernet Parts
"PushBotLifEthernet", "PushBotEthernetLaserDevice",
"PushBotEthernetLEDDevice", "PushBotEthernetMotorDevice",
"PushBotEthernetSpeakerDevice", "PushBotEthernetRetinaDevice",
# Pushbot SpiNNaker Link Parts
"PushBotLifSpinnakerLink", "PushBotSpiNNakerLinkLaserDevice",
"PushBotSpiNNakerLinkLEDDevice", "PushBotSpiNNakerLinkMotorDevice",
"PushBotSpiNNakerLinkSpeakerDevice", "PushBotSpiNNakerLinkRetinaDevice",
# Connections
"SpynnakerLiveSpikesConnection",
# Provided functions
"activate_live_output_for",
"activate_live_output_to",
"SpikeInjector",
"register_database_notification_request"
]
def register_database_notification_request(hostname, notify_port, ack_port):
""" Adds a socket system which is registered with the notification protocol
:param hostname: ip address of host
:param notify_port: port for listeing for when database is set up
:param ack_port: the port for sending back the ack
:rtype: None
"""
spynnaker_external_devices.add_socket_address(__SockAddr(
hostname, notify_port, ack_port))
def EthernetControl(
n_neurons, params, label=None, local_host=None, local_port=None,
database_notify_port_num=None, database_ack_port_num=None):
""" Create a PyNN population that can be included in a network to\
control an external device which is connected to the host
:param n_neurons: The number of neurons in the control population
:param model: Class of a model that implements AbstractEthernetController
:param params: The parameters of the model
:param label: An optional label for the population
:param local_host:\
The optional local host IP address to listen on for commands
:param lost_port: The optional local port to listen on for commands
:param database_ack_port_num:\
The optional port to which responses to the database notification\
protocol are to be sent
:param database_notify_port_num:\
The optional port to which notifications from the database\
notification protocol are to be sent
:return:\
A pyNN Population which can be used as the target of a Projection.\
Note that the Population can also be used as the source of a\
Projection, but it might not send spikes.
"""
if not issubclass(params['model'], AbstractEthernetController):
raise Exception(
"Model must be a subclass of AbstractEthernetController")
vertex = params['model']
translator = vertex.get_message_translator()
ethernet_control_connection = EthernetControlConnection(
translator, local_host, local_port)
devices_with_commands = [
device for device in vertex.get_external_devices()
if isinstance(device, AbstractSendMeMulticastCommandsVertex)
]
if len(devices_with_commands) > 0:
ethernet_command_connection = EthernetCommandConnection(
translator, devices_with_commands, local_host,
database_notify_port_num)
add_database_socket_address(
ethernet_command_connection.local_ip_address,
ethernet_command_connection.local_port, database_ack_port_num)
live_packet_gather = LivePacketGather(
ethernet_control_connection.local_ip_address,
ethernet_control_connection.local_port,
message_type=EIEIOType.KEY_PAYLOAD_32_BIT,
payload_as_time_stamps=False, use_payload_prefix=False)
spynnaker_external_devices.add_application_vertex(live_packet_gather)
for partition_id in vertex.get_outgoing_partition_ids():
spynnaker_external_devices.add_edge(
vertex, live_packet_gather, partition_id)
return vertex
def EthernetSensorPopulation(
model, params, local_host=None,
database_notify_port_num=None, database_ack_port_num=None):
""" Create a pyNN population which can be included in a network to\
receive spikes from a device connected to the host
:param model: Class of a model that implements AbstractEthernetController
:param params: The parameters of the model
:param local_host:\
The optional local host IP address to listen on for database\
notification
:param database_ack_port_num:\
The optional port to which responses to the database notification\
protocol are to be sent
:param database_notify_port_num:\
The optional port to which notifications from the database\
notification protocol are to be sent
:return:\
A pyNN Population which can be used as the source of a Projection.\
Note that the Population cannot be used as the target of a\
Projection.
"""
if not issubclass(model, AbstractEthernetSensor):
raise Exception("Model must be a subclass of AbstractEthernetSensor")
device = model(**params)
injector_params = dict(device.get_injector_parameters())
injector_params['notify'] = False
spike_injector_params = dict(injector_params)
spike_injector_params['n_neurons'] = device.get_n_neurons()
spike_injector_params['label'] = device.get_injector_label()
vertex = SpikeInjector(**injector_params)
if isinstance(device, AbstractSendMeMulticastCommandsVertex):
ethernet_command_connection = EthernetCommandConnection(
device.get_translator(), [device], local_host,
database_notify_port_num)
add_database_socket_address(
ethernet_command_connection.local_ip_address,
ethernet_command_connection.local_port, database_ack_port_num)
database_connection = device.get_database_connection()
if database_connection is not None:
add_database_socket_address(
database_connection.local_ip_address,
database_connection.local_port, database_ack_port_num)
return vertex
def SpikeInjector(
n_neurons, label, port=None, notify=True,
virtual_key=None, database_notify_host=None,
database_notify_port_num=None, database_ack_port_num=None):
""" Supports adding a spike injector to the application graph.
:param n_neurons: the number of neurons the spike injector will emulate
:type n_neurons: int
:param notify: allows us to not bother with the database system
:type notify: bool
:param label: the label given to the population
:type label: str
:param port: the port number used to listen for injections of spikes
:type port: int
:param virtual_key: the virtual key used in the routing system
:type virtual_key: int
:param database_notify_host: the hostname for the device which is\
listening to the database notification.
:type database_notify_host: str
:param database_ack_port_num: the port number to which a external device\
will acknowledge that they have finished reading the database and\
are ready for it to start execution
:type database_ack_port_num: int
:param database_notify_port_num: The port number to which a external\
device will receive the database is ready command
:type database_notify_port_num: int
"""
if notify:
_process_database_socket(
database_notify_port_num, database_notify_host,
database_ack_port_num)
return ExternalDeviceSpikeInjector(
n_neurons=n_neurons, label=label, port=port, virtual_key=virtual_key)
def _process_database_socket(
database_notify_port_num, database_notify_host, database_ack_port_num):
""" code to handle building a database socket address as needed
:param database_notify_port_num: the port num where to send the db is \
written packet.
:param database_notify_host: the ipaddress of where to send the db is \
written packet.
:param database_ack_port_num: the port number to listen on for ack of \
having read and set them selves up on.
:rtype: None
"""
config = globals_variables.get_simulator().config
if database_notify_port_num is None:
database_notify_port_num = config.getint("Database", "notify_port")
if database_notify_host is None:
database_notify_host = config.get("Database", "notify_hostname")
if database_ack_port_num is None:
database_ack_port_num = config.get("Database", "listen_port")
if database_ack_port_num == "None":
database_ack_port_num = None
# build the database socket address used by the notification interface
database_socket = SocketAddress(
listen_port=database_ack_port_num,
notify_host_name=database_notify_host,
notify_port_no=database_notify_port_num)
# update socket interface with new demands.
spynnaker_external_devices.add_socket_address(database_socket) | /sPyNNaker7-1!4.0.0.tar.gz/sPyNNaker7-1!4.0.0/spynnaker7/pyNN/external_devices/__init__.py | 0.594904 | 0.151812 | __init__.py | pypi |
from pyNN.common import control as pynn_control
from pyNN.random import RandomDistribution, NumpyRNG
from pyNN import __version__ as pynn_version
from spinn_front_end_common.utilities import globals_variables
from spynnaker.pyNN.abstract_spinnaker_common import AbstractSpiNNakerCommon
from spynnaker8 import _version
from spynnaker8.spynnaker8_simulator_interface \
import Spynnaker8SimulatorInterface
from spynnaker8.utilities.spynnaker8_failed_state import Spynnaker8FailedState
from spynnaker8.utilities.random_stats import RandomStatsExponentialImpl
from spynnaker8.utilities.random_stats import RandomStatsGammaImpl
from spynnaker8.utilities.random_stats import RandomStatsLogNormalImpl
from spynnaker8.utilities.random_stats import RandomStatsNormalClippedImpl
from spynnaker8.utilities.random_stats import RandomStatsNormalImpl
from spynnaker8.utilities.random_stats import RandomStatsPoissonImpl
from spynnaker8.utilities.random_stats import RandomStatsRandIntImpl
from spynnaker8.utilities.random_stats import RandomStatsUniformImpl
from spynnaker8.utilities.random_stats import RandomStatsVonmisesImpl
from spynnaker8.utilities.random_stats import RandomStatsBinomialImpl
from _version import __version__ as version
import logging
import math
from quantities import __version__ as quantities_version
from neo import __version__ as neo_version
from lazyarray import __version__ as lazyarray_version
logger = logging.getLogger(__name__)
# At import time change the default FailedState
globals_variables.set_failed_state(Spynnaker8FailedState())
NAME = "SpiNNaker_under_version({}-{})".format(
_version.__version__, _version.__version_name__)
class SpiNNaker(AbstractSpiNNakerCommon, pynn_control.BaseState,
Spynnaker8SimulatorInterface):
""" main interface for the stuff software for PyNN 0.8
"""
def __init__(
self, database_socket_addresses,
extra_algorithm_xml_paths, extra_mapping_inputs,
extra_mapping_algorithms, extra_pre_run_algorithms,
extra_post_run_algorithms, extra_load_algorithms,
time_scale_factor, min_delay, max_delay, graph_label,
n_chips_required, timestep=0.1, hostname=None):
# change min delay auto to be the min delay supported by simulator
if min_delay == "auto":
min_delay = timestep
# population and projection holders
self._populations = list()
self._projections = list()
# pynn demanded objects
self._id_counter = 42
self._segment_counter = 0
self._recorders = set([])
# main pynn interface inheritance
pynn_control.BaseState.__init__(self)
# handle the extra load algorithms and the built in ones
built_in_extra_load_algorithms = list()
if extra_load_algorithms is not None:
built_in_extra_load_algorithms.extend(extra_load_algorithms)
# handle extra xmls and the ones needed by default
built_in_extra_xml_paths = list()
if extra_algorithm_xml_paths is not None:
built_in_extra_xml_paths.extend(extra_algorithm_xml_paths)
# handle the extra mapping inputs and the built in ones
built_in_extra_mapping_inputs = dict()
if extra_mapping_inputs is not None:
built_in_extra_mapping_inputs.update(
built_in_extra_mapping_inputs)
front_end_versions = [("sPyNNaker8_version", version)]
front_end_versions.append(("pyNN_version", pynn_version))
front_end_versions.append(("quantities_version", quantities_version))
front_end_versions.append(("neo_version", neo_version))
front_end_versions.append(("lazyarray_version", lazyarray_version))
# spinnaker setup
AbstractSpiNNakerCommon.__init__(
self, database_socket_addresses=database_socket_addresses,
user_extra_algorithm_xml_path=built_in_extra_xml_paths,
user_extra_mapping_inputs=built_in_extra_mapping_inputs,
extra_mapping_algorithms=extra_mapping_algorithms,
user_extra_algorithms_pre_run=extra_pre_run_algorithms,
extra_post_run_algorithms=extra_post_run_algorithms,
extra_load_algorithms=built_in_extra_load_algorithms,
graph_label=graph_label, n_chips_required=n_chips_required,
hostname=hostname, min_delay=min_delay,
max_delay=max_delay, timestep=timestep,
time_scale_factor=time_scale_factor,
front_end_versions=front_end_versions)
def run(self, simtime):
""" PyNN run simulation (enforced method and parameter name)
:param simtime: the runtime in milliseconds
:return: None
"""
self._run(simtime)
def run_until(self, tstop):
""" functions demanded by pynn level api
:param tstop: when to run until in milliseconds
:return: None
"""
# Build data
self._run(tstop - self.t)
def clear(self):
""" whats clear vs reset??????????
:return: None
"""
self.recorders = set([])
self._id_counter = 42
self._segment_counter = -1
self.reset()
# Stop any currently running SpiNNaker application
self.stop()
def reset(self):
"""Reset the state of the current network to time t = 0.
:return: None
"""
for population in self._populations:
population.cache_data()
self._segment_counter += 1
AbstractSpiNNakerCommon.reset(self)
def _run(self, duration_ms):
""" main interface for the starting of stuff
:param duration_ms:
:return:
"""
# Convert dt into microseconds and divide by
# realtime proportion to get hardware timestep
hardware_timestep_us = int(round((1000.0 * float(self.dt)) /
float(self.timescale_factor)))
# Determine how long simulation is in timesteps
duration_timesteps = \
int(math.ceil(float(duration_ms) / float(self.dt)))
logger.info("Simulating for %u %fms timesteps "
"using a hardware timestep of %uus",
duration_timesteps, self.dt, hardware_timestep_us)
AbstractSpiNNakerCommon.run(self, duration_ms)
@property
def state(self):
""" used to bypass the stupid duel level object
:return: the stuff object
"""
return self
@property
def mpi_rank(self):
""" method demanded by PyNN due to MPI assumptions
:return: ??????????
"""
return 0
@mpi_rank.setter
def mpi_rank(self, new_value):
""" this has no point in stuff
:param new_value: pointless entry
:return:
"""
pass
@property
def num_processes(self):
""" method demanded by PyNN due to MPI assumptions
:return: ???????
"""
return 1
@num_processes.setter
def num_processes(self, new_value):
""" pointless method in stuff but needed for pynn interface
:param new_value: pointless entry
:return:
"""
pass
@property
def dt(self):
""" method demanded by PyNN due to api assumptions
:return: the machine time step
"""
return self._machine_time_step
@dt.setter
def dt(self, new_value):
""" setter for the machine time step (forced by PyNN)
:param new_value: new value for machine time step
:return: None
"""
self._machine_time_step = new_value
@property
def t(self):
""" method demanded by PyNN due to api assumptions
:return: the current runtime already executed
"""
return (
float(self._current_run_timesteps) *
(float(self._machine_time_step) / 1000.0))
@property
def segment_counter(self):
""" method demanded by the PyNN due to api assumptions
:return: the segment counter ??????
"""
return self._segment_counter
@segment_counter.setter
def segment_counter(self, new_value):
""" method demanded by the PyNN due to api assumptions
:param new_value: new value for the segment counter
:return: None
"""
self._segment_counter = new_value
@property
def id_counter(self):
""" property for id_counter, currently used by the populations.
(maybe it could live in the pop class???)
:return:
"""
return self._id_counter
@id_counter.setter
def id_counter(self, new_value):
""" setter for id_counter, currently used by the populations.
(maybe it could live in the pop class???)
:param new_value: new value for id_counter
:return:
"""
self._id_counter = new_value
@property
def running(self):
""" property method required from the base state object (ties into
our has_ran parameter for auto pause and resume
:return: the has_ran variable from the spinnaker main interface
"""
return self._has_ran
@running.setter
def running(self, new_value):
""" setter for the has_ran parameter, only used by the pynn interface,
supports tracking where it thinks its setting this parameter.
:param new_value: the new value for the simulation
:return: None
"""
self._has_ran = new_value
@property
def name(self):
""" interface function needed to ensure pynn recoridng neo blocks are
correctly labelled.
:return: the name of the simulator.
"""
return NAME
@property
def populations(self):
""" property for the population list. needed by the population class.
:return:
"""
return self._populations
@property
def projections(self):
""" property for the projections list. needed by the projection class.
:return:
"""
return self._projections
@property
def recorders(self):
""" property method for the recorders, used by the pynn state object
:return: the internal recorders object
"""
return self._recorders
@recorders.setter
def recorders(self, new_value):
""" setter for the internal recorders object
:param new_value: the new value for the recorder
:return: None
"""
self._recorders = new_value
def get_distribution_to_stats(self):
return {
'binomial': RandomStatsBinomialImpl(),
'gamma': RandomStatsGammaImpl(),
'exponential': RandomStatsExponentialImpl(),
'lognormal': RandomStatsLogNormalImpl(),
'normal': RandomStatsNormalImpl(),
'normal_clipped': RandomStatsNormalClippedImpl(),
'poisson': RandomStatsPoissonImpl(),
'uniform': RandomStatsUniformImpl(),
'randint': RandomStatsRandIntImpl(),
'vonmises': RandomStatsVonmisesImpl()}
def get_random_distribution(self):
return RandomDistribution
def is_a_pynn_random(self, thing):
return isinstance(thing, RandomDistribution)
def get_pynn_NumpyRNG(self):
return NumpyRNG() | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/spinnaker.py | 0.697094 | 0.202561 | spinnaker.py | pypi |
from spynnaker.pyNN.models.neuron import AbstractPopulationVertex
from spynnaker8.utilities import DataHolder
from spynnaker.pyNN.external_devices_models import ExternalDeviceLifControl
class ExternalDeviceLifControlDataHolder(DataHolder):
""" Leaky integrate and fire neuron with an exponentially decaying \
current input
"""
def __init__(
self, devices, create_edges, translator=None,
# default params from abstract pop vertex
spikes_per_second=AbstractPopulationVertex.
none_pynn_default_parameters['spikes_per_second'],
label=AbstractPopulationVertex.none_pynn_default_parameters[
'label'],
ring_buffer_sigma=AbstractPopulationVertex.
none_pynn_default_parameters['ring_buffer_sigma'],
incoming_spike_buffer_size=AbstractPopulationVertex.
none_pynn_default_parameters['incoming_spike_buffer_size'],
constraints=AbstractPopulationVertex.
none_pynn_default_parameters['constraints'],
# default params for the neuron model type
tau_m=ExternalDeviceLifControl.default_parameters['tau_m'],
cm=ExternalDeviceLifControl.default_parameters['cm'],
v_rest=ExternalDeviceLifControl.default_parameters['v_rest'],
v_reset=ExternalDeviceLifControl.default_parameters['v_reset'],
tau_syn_E=ExternalDeviceLifControl.default_parameters['tau_syn_E'],
tau_syn_I=ExternalDeviceLifControl.default_parameters['tau_syn_I'],
tau_refrac=ExternalDeviceLifControl.default_parameters[
'tau_refrac'],
i_offset=ExternalDeviceLifControl.default_parameters['i_offset'],
v_init=ExternalDeviceLifControl.none_pynn_default_parameters[
'v_init'],
isyn_inh=ExternalDeviceLifControl.default_parameters['isyn_inh'],
isyn_exc=ExternalDeviceLifControl.default_parameters['isyn_exc']):
DataHolder.__init__(
self,
{'devices': devices, 'create_edges': create_edges,
'translator': translator,
'spikes_per_second': spikes_per_second,
'ring_buffer_sigma': ring_buffer_sigma, 'label': label,
'incoming_spike_buffer_size': incoming_spike_buffer_size,
'constraints': constraints,
'tau_m': tau_m, 'cm': cm, 'v_rest': v_rest, 'v_reset': v_reset,
'tau_syn_E': tau_syn_E, 'tau_syn_I': tau_syn_I,
'tau_refrac': tau_refrac, 'i_offset': i_offset, 'v_init': v_init,
'isyn_inh': isyn_inh, 'isyn_exc': isyn_exc})
@staticmethod
def build_model():
return ExternalDeviceLifControl | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/external_device_models/external_device_lif_control.py | 0.737914 | 0.331066 | external_device_lif_control.py | pypi |
from spynnaker.pyNN.models.neuron import AbstractPopulationVertex
from spynnaker8.utilities import DataHolder
from spynnaker.pyNN.external_devices_models.push_bot.push_bot_control_modules \
import PushBotLifEthernet
from spynnaker.pyNN.external_devices_models import ExternalDeviceLifControl
class PushBotLifEthernetDataHolder(DataHolder):
""" Leaky integrate and fire neuron with an exponentially decaying \
current input
"""
def __init__(
self, protocol, devices, pushbot_ip_address, pushbot_port=56000,
# default params from abstract pop vertex
spikes_per_second=AbstractPopulationVertex.
none_pynn_default_parameters['spikes_per_second'],
label=AbstractPopulationVertex.none_pynn_default_parameters[
'label'],
ring_buffer_sigma=AbstractPopulationVertex.
none_pynn_default_parameters['ring_buffer_sigma'],
incoming_spike_buffer_size=AbstractPopulationVertex.
none_pynn_default_parameters['incoming_spike_buffer_size'],
constraints=AbstractPopulationVertex.
none_pynn_default_parameters['constraints'],
# default params for the neuron model type
tau_m=ExternalDeviceLifControl.default_parameters['tau_m'],
cm=ExternalDeviceLifControl.default_parameters['cm'],
v_rest=ExternalDeviceLifControl.default_parameters['v_rest'],
v_reset=ExternalDeviceLifControl.default_parameters['v_reset'],
tau_syn_E=ExternalDeviceLifControl.default_parameters['tau_syn_E'],
tau_syn_I=ExternalDeviceLifControl.default_parameters['tau_syn_I'],
tau_refrac=ExternalDeviceLifControl.default_parameters[
'tau_refrac'],
i_offset=ExternalDeviceLifControl.default_parameters['i_offset'],
v_init=PushBotLifEthernet.none_pynn_default_parameters['v_init']):
DataHolder.__init__(
self,
{'protocol': protocol, 'devices': devices,
'spikes_per_second': spikes_per_second,
'ring_buffer_sigma': ring_buffer_sigma, 'label': label,
'incoming_spike_buffer_size': incoming_spike_buffer_size,
'constraints': constraints,
'tau_m': tau_m, 'cm': cm, 'v_rest': v_rest, 'v_reset': v_reset,
'tau_syn_E': tau_syn_E, 'tau_syn_I': tau_syn_I,
'tau_refrac': tau_refrac, 'i_offset': i_offset, 'v_init': v_init,
'pushbot_ip_address': pushbot_ip_address,
'pushbot_port': pushbot_port})
@staticmethod
def build_model():
return PushBotLifEthernet | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/external_device_models/push_bot/push_bot_control_models/push_bot_lif_ethernet.py | 0.725746 | 0.25139 | push_bot_lif_ethernet.py | pypi |
from spynnaker.pyNN.models.neuron import AbstractPopulationVertex
from spynnaker8.utilities import DataHolder
from spynnaker.pyNN.external_devices_models.push_bot.push_bot_control_modules \
import PushBotLifSpinnakerLink
from spynnaker.pyNN.external_devices_models import ExternalDeviceLifControl
import logging
logger = logging.getLogger(__name__)
class PushBotLifSpinnakerLinkDataHolder(DataHolder):
""" Control module for a pushbot connected to a SpiNNaker Link
"""
def __init__(
self, protocol, devices,
spikes_per_second=AbstractPopulationVertex.
none_pynn_default_parameters['spikes_per_second'],
label=AbstractPopulationVertex.none_pynn_default_parameters[
'label'],
ring_buffer_sigma=AbstractPopulationVertex.
none_pynn_default_parameters['ring_buffer_sigma'],
incoming_spike_buffer_size=AbstractPopulationVertex.
none_pynn_default_parameters['incoming_spike_buffer_size'],
constraints=AbstractPopulationVertex.
none_pynn_default_parameters['constraints'],
# default params for the neuron model type
tau_m=ExternalDeviceLifControl.default_parameters['tau_m'],
cm=ExternalDeviceLifControl.default_parameters['cm'],
v_rest=ExternalDeviceLifControl.default_parameters['v_rest'],
v_reset=ExternalDeviceLifControl.default_parameters['v_reset'],
tau_syn_E=ExternalDeviceLifControl.default_parameters['tau_syn_E'],
tau_syn_I=ExternalDeviceLifControl.default_parameters['tau_syn_I'],
tau_refrac=ExternalDeviceLifControl.default_parameters[
'tau_refrac'],
i_offset=ExternalDeviceLifControl.default_parameters['i_offset'],
v_init=ExternalDeviceLifControl.none_pynn_default_parameters[
'v_init']):
DataHolder.__init__(
self,
{'protocol': protocol, 'devices': devices,
'spikes_per_second': spikes_per_second,
'ring_buffer_sigma': ring_buffer_sigma, 'label': label,
'incoming_spike_buffer_size': incoming_spike_buffer_size,
'constraints': constraints,
'tau_m': tau_m, 'cm': cm, 'v_rest': v_rest, 'v_reset': v_reset,
'tau_syn_E': tau_syn_E, 'tau_syn_I': tau_syn_I,
'tau_refrac': tau_refrac, 'i_offset': i_offset, 'v_init': v_init})
@staticmethod
def build_model():
return PushBotLifSpinnakerLink | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/external_device_models/push_bot/push_bot_control_models/push_bot_lif_spinnaker_link.py | 0.688678 | 0.19252 | push_bot_lif_spinnaker_link.py | pypi |
from datetime import datetime
from spynnaker8.models.variable_cache import VariableCache
class DataCache(object):
""" Storage object to hold all the data to (re)create a Neo Segment
Required because deepcopy does not work on neo Objects
Stores the Data shared by all variable types at the top level
and holds a cache for the variable specific data
"""
__slots__ = ("_cache",
"_description",
"_first_id",
"_label",
"_rec_datetime",
"_recording_start_time",
"_sampling_interval",
"_segment_number",
"_t")
_cache = dict()
def __init__(self, label, description, segment_number,
recording_start_time, t, sampling_interval, first_id):
""" constructor
:param label: cache label
:param description: cache description
:param segment_number: cache segment number
:param recording_start_time: when this cache was started in\
recording space.
:param t: time
:param sampling_interval: sampling interval, same as t in spynnaker \
world to date.
:param first_id: first atom
"""
self._label = label
self._description = description
self._segment_number = segment_number
self._recording_start_time = recording_start_time
self._t = t
self._sampling_interval = sampling_interval
self._first_id = first_id
@property
def variables(self):
"""Provides a list of which variables data has been cached for
rtype: Iterator (str)
"""
return self._cache.keys()
@property
def label(self):
return self._label
@property
def description(self):
return self._description
@property
def segment_number(self):
return self._segment_number
@property
def recording_start_time(self):
return self._recording_start_time
@property
def t(self):
return self._t
@property
def sampling_interval(self):
return self._sampling_interval
@property
def first_id(self):
return self._first_id
@property
def rec_datetime(self):
return self._rec_datetime
def has_data(self, variable):
"""
Checks if data for a variable has been cached
:param variable: Name of variable
:type variable: str
:return: True if there is cached data
:rtype bool
"""
return variable in self._cache
def get_data(self, variable):
"""
Get the varaaible cahe for the named variable
:param variable: name of variable to get cache for
:rtype variable: str
:return: The cache data, ids, indexes and units
:rtype VariableCache
"""
return self._cache[variable]
def save_data(self, variable, data, ids, indexes, units):
"""
Saves the data for one variable in this segment
:param variable: name of variable data applies to
:type variable: str
:param data: raw data in spynakker format
:type data: nparray
:param ids: ids for which data should be returned
:type nparray
:param indexes: indexes for whcih data should be retreived
:type indexes: nparray
:param units: the units in which the data is
:type units: str
:rtype None
"""
self._rec_datetime = datetime.now()
variable_cache = VariableCache(data, ids, indexes, units)
self._cache[variable] = variable_cache | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/models/data_cache.py | 0.90474 | 0.563078 | data_cache.py | pypi |
from spynnaker.pyNN.models.neural_projections.connectors \
import FixedProbabilityConnector as CommonFixedProbabilityConnector
from pyNN.connectors import FixedProbabilityConnector as \
PyNNFixedProbabilityConnector
class FixedProbabilityConnector(
CommonFixedProbabilityConnector, PyNNFixedProbabilityConnector):
"""
"""
def __init__(
self, p_connect, allow_self_connections=True, safe=True,
verbose=False, rng=None, callback=None):
""" For each pair of pre-post cells, the connection probability is
constant.
:param p_connect: a float between zero and one. Each potential
connection is created with this probability.
:param allow_self_connections: if the connector is used to connect a
Population to itself, this flag determines whether a neuron is
allowed to connect to itself, or only to other neurons in the
Population.
:param safe: if True, check that weights and delays have valid values.
If False, this check is skipped.
:param space: a Space object, needed if you wish to specify distance-
dependent weights or delays - not implemented
:param verbose:
:param rng:
:param callback:
"""
CommonFixedProbabilityConnector.__init__(
self, p_connect=p_connect,
allow_self_connections=allow_self_connections, safe=safe,
verbose=verbose)
PyNNFixedProbabilityConnector.__init__(
self, p_connect=p_connect, callback=callback,
allow_self_connections=allow_self_connections, rng=rng, safe=safe)
def set_weights_and_delays(self, weights, delays):
self._weights = weights
self._delays = delays
self._check_parameters(weights, delays, allow_lists=False)
@property
def p_connect(self):
return self._p_connect
@p_connect.setter
def p_connect(self, new_value):
self._p_connect = new_value | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/models/connectors/fixed_probability_connector.py | 0.875015 | 0.510435 | fixed_probability_connector.py | pypi |
from spynnaker.pyNN.models.neuron import AbstractPopulationVertex
from spynnaker8.utilities import DataHolder
from spynnaker.pyNN.models.neuron.builds import IFCurrDualExpBase
class IFCurrDualExpDataHolder(DataHolder):
def __init__(
self,
spikes_per_second=AbstractPopulationVertex.
none_pynn_default_parameters['spikes_per_second'],
ring_buffer_sigma=AbstractPopulationVertex.
none_pynn_default_parameters['ring_buffer_sigma'],
incoming_spike_buffer_size=AbstractPopulationVertex.
none_pynn_default_parameters['incoming_spike_buffer_size'],
constraints=AbstractPopulationVertex.
none_pynn_default_parameters['constraints'],
label=AbstractPopulationVertex.none_pynn_default_parameters[
'label'],
tau_m=IFCurrDualExpBase.default_parameters['tau_m'],
cm=IFCurrDualExpBase.default_parameters['cm'],
v_rest=IFCurrDualExpBase.default_parameters['v_rest'],
v_reset=IFCurrDualExpBase.default_parameters['v_reset'],
v_thresh=IFCurrDualExpBase.default_parameters['v_thresh'],
tau_syn_E=IFCurrDualExpBase.default_parameters['tau_syn_E'],
tau_syn_E2=IFCurrDualExpBase.default_parameters['tau_syn_E2'],
tau_syn_I=IFCurrDualExpBase.default_parameters['tau_syn_I'],
tau_refrac=IFCurrDualExpBase.default_parameters['tau_refrac'],
i_offset=IFCurrDualExpBase.default_parameters['i_offset'],
v_init=IFCurrDualExpBase.none_pynn_default_parameters['v_init'],
isyn_exc=IFCurrDualExpBase.default_parameters['isyn_exc'],
isyn_inh=IFCurrDualExpBase.default_parameters['isyn_inh'],
isyn_exc2=IFCurrDualExpBase.default_parameters['isyn_exc2']):
DataHolder.__init__(
self,
{'spikes_per_second': spikes_per_second,
'ring_buffer_sigma': ring_buffer_sigma, 'label': label,
'incoming_spike_buffer_size': incoming_spike_buffer_size,
'constraints': constraints, 'tau_refrac': tau_refrac,
'tau_m': tau_m, 'tau_syn_E': tau_syn_E, 'cm': cm,
'v_rest': v_rest, 'v_reset': v_reset, 'v_thresh': v_thresh,
'tau_syn_E2': tau_syn_E2, 'tau_syn_I': tau_syn_I,
'i_offset': i_offset, 'v_init': v_init,
'isyn_exc': isyn_exc, 'isyn_inh': isyn_inh,
'isyn_exc2': isyn_exc2})
@staticmethod
def build_model():
return IFCurrDualExpBase | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/models/model_data_holders/if_curr_dual_exp_data_holder.py | 0.637934 | 0.41834 | if_curr_dual_exp_data_holder.py | pypi |
from spynnaker.pyNN.models.neuron import AbstractPopulationVertex
from spynnaker8.utilities import DataHolder
from spynnaker.pyNN.models.neuron.builds import IzkCondExpBase
class IzkCondExpDataHolder(DataHolder):
def __init__(
self,
spikes_per_second=AbstractPopulationVertex.
none_pynn_default_parameters['spikes_per_second'],
ring_buffer_sigma=AbstractPopulationVertex.
none_pynn_default_parameters['ring_buffer_sigma'],
incoming_spike_buffer_size=AbstractPopulationVertex.
none_pynn_default_parameters['incoming_spike_buffer_size'],
constraints=AbstractPopulationVertex.none_pynn_default_parameters[
'constraints'],
label=AbstractPopulationVertex.none_pynn_default_parameters[
'label'],
a=IzkCondExpBase.default_parameters['a'],
b=IzkCondExpBase.default_parameters['b'],
c=IzkCondExpBase.default_parameters['c'],
d=IzkCondExpBase.default_parameters['d'],
i_offset=IzkCondExpBase.default_parameters['i_offset'],
u_init=IzkCondExpBase.default_parameters['u_init'],
v_init=IzkCondExpBase.default_parameters['v_init'],
tau_syn_E=IzkCondExpBase.default_parameters['tau_syn_E'],
tau_syn_I=IzkCondExpBase.default_parameters['tau_syn_I'],
e_rev_E=IzkCondExpBase.default_parameters['e_rev_E'],
e_rev_I=IzkCondExpBase.default_parameters['e_rev_I'],
isyn_exc=IzkCondExpBase.default_parameters['isyn_exc'],
isyn_inh=IzkCondExpBase.default_parameters['isyn_inh']):
DataHolder.__init__(
self,
{'spikes_per_second': spikes_per_second,
'ring_buffer_sigma': ring_buffer_sigma,
'incoming_spike_buffer_size': incoming_spike_buffer_size,
'constraints': constraints, 'label': label, 'a': a, 'b': b,
'c': c, 'd': d, 'i_offset': i_offset, 'u_init': u_init,
'v_init': v_init, 'tau_syn_E': tau_syn_E, 'tau_syn_I': tau_syn_I,
'isyn_exc': isyn_exc, 'isyn_inh': isyn_inh, 'e_rev_E': e_rev_E,
'e_rev_I': e_rev_I})
@staticmethod
def build_model():
return IzkCondExpBase | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/models/model_data_holders/izk_cond_exp_data_holder.py | 0.634204 | 0.296552 | izk_cond_exp_data_holder.py | pypi |
from spynnaker.pyNN.models.neuron import AbstractPopulationVertex
from spynnaker8.utilities import DataHolder
from spynnaker.pyNN.models.neuron.builds import IFCurrDelta
class IfCurrDeltaDataHolder(DataHolder):
def __init__(
self, spikes_per_second=AbstractPopulationVertex.
none_pynn_default_parameters['spikes_per_second'],
ring_buffer_sigma=AbstractPopulationVertex.
none_pynn_default_parameters['ring_buffer_sigma'],
incoming_spike_buffer_size=AbstractPopulationVertex.
none_pynn_default_parameters['incoming_spike_buffer_size'],
constraints=AbstractPopulationVertex.none_pynn_default_parameters[
'constraints'],
label=AbstractPopulationVertex.none_pynn_default_parameters[
'label'],
tau_m=IFCurrDelta.default_parameters['tau_m'],
cm=IFCurrDelta.default_parameters['cm'],
v_rest=IFCurrDelta.default_parameters['v_rest'],
v_reset=IFCurrDelta.default_parameters['v_reset'],
v_thresh=IFCurrDelta.default_parameters['v_thresh'],
tau_refrac=IFCurrDelta.default_parameters['tau_refrac'],
i_offset=IFCurrDelta.default_parameters['i_offset'],
v_init=IFCurrDelta.none_pynn_default_parameters['v_init'],
isyn_exc=IFCurrDelta.default_parameters['isyn_exc'],
isyn_inh=IFCurrDelta.default_parameters['isyn_inh']):
DataHolder.__init__(
self,
{
'spikes_per_second': spikes_per_second,
'ring_buffer_sigma': ring_buffer_sigma,
'incoming_spike_buffer_size': incoming_spike_buffer_size,
'constraints': constraints, 'label': label,
'tau_m': tau_m, 'cm': cm, 'v_rest': v_rest,
'v_reset': v_reset, 'v_thresh': v_thresh,
'tau_refrac': tau_refrac, 'i_offset': i_offset,
'v_init': v_init, 'isyn_exc': isyn_exc, 'isyn_inh': isyn_inh})
@staticmethod
def build_model():
return IFCurrDelta | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/models/model_data_holders/if_curr_delta_data_holder.py | 0.68595 | 0.366051 | if_curr_delta_data_holder.py | pypi |
from spynnaker.pyNN.models.neuron import AbstractPopulationVertex
from spynnaker8.utilities import DataHolder
from spynnaker.pyNN.models.neuron.builds import IFCurrExpBase
class IFCurrExpDataHolder(DataHolder):
def __init__(
self,
spikes_per_second=AbstractPopulationVertex.
none_pynn_default_parameters['spikes_per_second'],
ring_buffer_sigma=AbstractPopulationVertex.
none_pynn_default_parameters['ring_buffer_sigma'],
incoming_spike_buffer_size=AbstractPopulationVertex.
none_pynn_default_parameters['incoming_spike_buffer_size'],
constraints=AbstractPopulationVertex.none_pynn_default_parameters[
'constraints'],
label=AbstractPopulationVertex.none_pynn_default_parameters[
'label'],
v_init=IFCurrExpBase.none_pynn_default_parameters['v_init'],
tau_m=IFCurrExpBase.default_parameters['tau_m'],
cm=IFCurrExpBase.default_parameters['cm'],
v_rest=IFCurrExpBase.default_parameters['v_rest'],
v_reset=IFCurrExpBase.default_parameters['v_reset'],
v_thresh=IFCurrExpBase.default_parameters['v_thresh'],
tau_syn_E=IFCurrExpBase.default_parameters['tau_syn_E'],
tau_syn_I=IFCurrExpBase.default_parameters['tau_syn_I'],
tau_refrac=IFCurrExpBase.default_parameters['tau_refrac'],
i_offset=IFCurrExpBase.default_parameters['i_offset']):
DataHolder.__init__(
self, {
'spikes_per_second': spikes_per_second,
'ring_buffer_sigma': ring_buffer_sigma,
'incoming_spike_buffer_size': incoming_spike_buffer_size,
'constraints': constraints,
'label': label, 'tau_m': tau_m, 'cm': cm, 'v_rest': v_rest,
'v_reset': v_reset, 'v_thresh': v_thresh,
'tau_syn_E': tau_syn_E, 'tau_syn_I': tau_syn_I,
'tau_refrac': tau_refrac, 'i_offset': i_offset,
'v_init': v_init})
@staticmethod
def build_model():
return IFCurrExpBase | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/models/model_data_holders/if_curr_exp_data_holder.py | 0.669745 | 0.298683 | if_curr_exp_data_holder.py | pypi |
from spynnaker.pyNN.models.neuron import AbstractPopulationVertex
from spynnaker8.utilities import DataHolder
from spynnaker.pyNN.models.neuron.builds import IFCondExpBase
class IFCondExpDataHolder(DataHolder):
def __init__(
self,
spikes_per_second=AbstractPopulationVertex.
none_pynn_default_parameters['spikes_per_second'],
ring_buffer_sigma=AbstractPopulationVertex.
none_pynn_default_parameters['ring_buffer_sigma'],
incoming_spike_buffer_size=AbstractPopulationVertex.
none_pynn_default_parameters['incoming_spike_buffer_size'],
constraints=AbstractPopulationVertex.none_pynn_default_parameters[
'constraints'],
label=AbstractPopulationVertex.none_pynn_default_parameters[
'label'],
v_init=IFCondExpBase.none_pynn_default_parameters['v_init'],
tau_m=IFCondExpBase.default_parameters['tau_m'],
cm=IFCondExpBase.default_parameters['cm'],
v_rest=IFCondExpBase.default_parameters['v_rest'],
v_reset=IFCondExpBase.default_parameters['v_reset'],
v_thresh=IFCondExpBase.default_parameters['v_thresh'],
tau_syn_E=IFCondExpBase.default_parameters['tau_syn_E'],
tau_syn_I=IFCondExpBase.default_parameters['tau_syn_I'],
tau_refrac=IFCondExpBase.default_parameters['tau_refrac'],
i_offset=IFCondExpBase.default_parameters['i_offset'],
e_rev_E=IFCondExpBase.default_parameters['e_rev_E'],
e_rev_I=IFCondExpBase.default_parameters['e_rev_I'],
isyn_exc=IFCondExpBase.default_parameters['isyn_exc'],
isyn_inh=IFCondExpBase.default_parameters['isyn_inh']):
DataHolder.__init__(
self, {
'spikes_per_second': spikes_per_second,
'ring_buffer_sigma': ring_buffer_sigma,
'incoming_spike_buffer_size': incoming_spike_buffer_size,
'constraints': constraints, 'label': label,
'v_init': v_init, 'tau_m': tau_m, 'cm': cm, 'v_rest': v_rest,
'v_reset': v_reset, 'v_thresh': v_thresh,
'tau_syn_E': tau_syn_E, 'tau_syn_I': tau_syn_I,
'tau_refrac': tau_refrac, 'i_offset': i_offset,
'e_rev_E': e_rev_E, 'e_rev_I': e_rev_I, 'isyn_exc': isyn_exc,
'isyn_inh': isyn_inh})
@staticmethod
def build_model():
return IFCondExpBase | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/models/model_data_holders/if_cond_exp_data_holder.py | 0.643777 | 0.304223 | if_cond_exp_data_holder.py | pypi |
from spynnaker.pyNN.models.neuron import AbstractPopulationVertex
from spynnaker8.utilities import DataHolder
from spynnaker.pyNN.models.neuron.builds import IFCurrExpCa2Adaptive
class IfCurrExpCa2AdaptiveDataHolder(DataHolder):
def __init__(
self, spikes_per_second=AbstractPopulationVertex.
none_pynn_default_parameters['spikes_per_second'],
ring_buffer_sigma=AbstractPopulationVertex.
none_pynn_default_parameters['ring_buffer_sigma'],
incoming_spike_buffer_size=AbstractPopulationVertex.
none_pynn_default_parameters['incoming_spike_buffer_size'],
constraints=AbstractPopulationVertex.none_pynn_default_parameters[
'constraints'],
label=AbstractPopulationVertex.none_pynn_default_parameters[
'label'],
tau_m=IFCurrExpCa2Adaptive.default_parameters['tau_m'],
cm=IFCurrExpCa2Adaptive.default_parameters['cm'],
v_rest=IFCurrExpCa2Adaptive.default_parameters['v_rest'],
v_reset=IFCurrExpCa2Adaptive.default_parameters['v_reset'],
v_thresh=IFCurrExpCa2Adaptive.default_parameters['v_thresh'],
tau_syn_E=IFCurrExpCa2Adaptive.default_parameters['tau_syn_E'],
tau_syn_I=IFCurrExpCa2Adaptive.default_parameters['tau_syn_I'],
tau_refrac=IFCurrExpCa2Adaptive.default_parameters['tau_refrac'],
i_offset=IFCurrExpCa2Adaptive.default_parameters['i_offset'],
tau_ca2=IFCurrExpCa2Adaptive.default_parameters["tau_ca2"],
i_ca2=IFCurrExpCa2Adaptive.default_parameters["i_ca2"],
i_alpha=IFCurrExpCa2Adaptive.default_parameters["i_alpha"],
v_init=IFCurrExpCa2Adaptive.none_pynn_default_parameters['v_init'],
isyn_exc=IFCurrExpCa2Adaptive.default_parameters['isyn_exc'],
isyn_inh=IFCurrExpCa2Adaptive.default_parameters['isyn_inh']):
DataHolder.__init__(
self,
{
'spikes_per_second': spikes_per_second,
'ring_buffer_sigma': ring_buffer_sigma,
'incoming_spike_buffer_size': incoming_spike_buffer_size,
'constraints': constraints, 'label': label,
'tau_m': tau_m, 'cm': cm, 'v_rest': v_rest,
'v_reset': v_reset, 'v_thresh': v_thresh,
'tau_syn_E': tau_syn_E, 'tau_syn_I': tau_syn_I,
'tau_refrac': tau_refrac, 'i_offset': i_offset,
'v_init': v_init, 'isyn_exc': isyn_exc, 'isyn_inh': isyn_inh,
'tau_ca2': tau_ca2, 'i_ca2': i_ca2, 'i_alpha': i_alpha})
@staticmethod
def build_model():
return IFCurrExpCa2Adaptive | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/models/model_data_holders/if_curr_exp_ca2_adaptive_data_holder.py | 0.670824 | 0.395455 | if_curr_exp_ca2_adaptive_data_holder.py | pypi |
from spynnaker.pyNN.models.neuron import AbstractPopulationVertex
from spynnaker8.utilities import DataHolder
from spynnaker.pyNN.models.neuron.builds import IzkCurrExpBase
class IzkCurrExpDataHolder(DataHolder):
def __init__(
self,
spikes_per_second=AbstractPopulationVertex.
none_pynn_default_parameters['spikes_per_second'],
ring_buffer_sigma=AbstractPopulationVertex.
none_pynn_default_parameters['ring_buffer_sigma'],
incoming_spike_buffer_size=AbstractPopulationVertex.
none_pynn_default_parameters['incoming_spike_buffer_size'],
constraints=AbstractPopulationVertex.none_pynn_default_parameters[
'constraints'],
label=AbstractPopulationVertex.none_pynn_default_parameters[
'label'],
a=IzkCurrExpBase.default_parameters['a'],
b=IzkCurrExpBase.default_parameters['b'],
c=IzkCurrExpBase.default_parameters['c'],
d=IzkCurrExpBase.default_parameters['d'],
i_offset=IzkCurrExpBase.default_parameters['i_offset'],
u_init=IzkCurrExpBase.default_parameters['u_init'],
v_init=IzkCurrExpBase.default_parameters['v_init'],
tau_syn_E=IzkCurrExpBase.default_parameters['tau_syn_E'],
tau_syn_I=IzkCurrExpBase.default_parameters['tau_syn_I'],
isyn_exc=IzkCurrExpBase.default_parameters['isyn_exc'],
isyn_inh=IzkCurrExpBase.default_parameters['isyn_inh']):
DataHolder.__init__(
self,
{'spikes_per_second': spikes_per_second,
'ring_buffer_sigma': ring_buffer_sigma,
'incoming_spike_buffer_size': incoming_spike_buffer_size,
'constraints': constraints, 'label': label, 'a': a, 'b': b,
'c': c, 'd': d, 'i_offset': i_offset, 'u_init': u_init,
'v_init': v_init, 'tau_syn_E': tau_syn_E, 'tau_syn_I': tau_syn_I,
'isyn_exc': isyn_exc, 'isyn_inh': isyn_inh})
@staticmethod
def build_model():
return IzkCurrExpBase | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/models/model_data_holders/izk_curr_exp_data_holder.py | 0.66769 | 0.330958 | izk_curr_exp_data_holder.py | pypi |
from spynnaker8.utilities import DataHolder
from spynnaker.pyNN.models.spike_source import SpikeSourceArray
class SpikeSourceArrayDataHolder(DataHolder):
def __init__(
self,
spike_times=SpikeSourceArray.default_parameters['spike_times'],
port=SpikeSourceArray.none_pynn_default_parameters['port'],
tag=SpikeSourceArray.none_pynn_default_parameters['tag'],
ip_address=SpikeSourceArray.none_pynn_default_parameters[
'ip_address'],
board_address=SpikeSourceArray.none_pynn_default_parameters[
'board_address'],
max_on_chip_memory_usage_for_spikes_in_bytes=SpikeSourceArray.
none_pynn_default_parameters[
'max_on_chip_memory_usage_for_spikes_in_bytes'],
space_before_notification=SpikeSourceArray.
none_pynn_default_parameters['space_before_notification'],
constraints=SpikeSourceArray.none_pynn_default_parameters[
'constraints'],
label=SpikeSourceArray.none_pynn_default_parameters[
'label'],
spike_recorder_buffer_size=SpikeSourceArray.
none_pynn_default_parameters['spike_recorder_buffer_size'],
buffer_size_before_receive=SpikeSourceArray.
none_pynn_default_parameters['buffer_size_before_receive']):
DataHolder.__init__(
self, {
'spike_times': spike_times, 'port': port, 'tag': tag,
'ip_address': ip_address, 'board_address': board_address,
'max_on_chip_memory_usage_for_spikes_in_bytes':
max_on_chip_memory_usage_for_spikes_in_bytes,
'space_before_notification': space_before_notification,
'constraints': constraints, 'label': label,
'spike_recorder_buffer_size': spike_recorder_buffer_size,
'buffer_size_before_receive': buffer_size_before_receive})
@staticmethod
def build_model():
return SpikeSourceArray | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/models/model_data_holders/spike_source_array_data_holder.py | 0.623492 | 0.248888 | spike_source_array_data_holder.py | pypi |
from spynnaker.pyNN.models.neuron import AbstractPopulationVertex
from spynnaker8.utilities import DataHolder
from spynnaker.pyNN.models.neuron.builds import IFCondExpStoc
class IfCondExpStocDataHolder(DataHolder):
def __init__(
self, spikes_per_second=AbstractPopulationVertex.
none_pynn_default_parameters['spikes_per_second'],
ring_buffer_sigma=AbstractPopulationVertex.
none_pynn_default_parameters['ring_buffer_sigma'],
incoming_spike_buffer_size=AbstractPopulationVertex.
none_pynn_default_parameters['incoming_spike_buffer_size'],
constraints=AbstractPopulationVertex.none_pynn_default_parameters[
'constraints'],
label=AbstractPopulationVertex.none_pynn_default_parameters[
'label'],
tau_m=IFCondExpStoc.default_parameters['tau_m'],
cm=IFCondExpStoc.default_parameters['cm'],
v_rest=IFCondExpStoc.default_parameters['v_rest'],
v_reset=IFCondExpStoc.default_parameters['v_reset'],
v_thresh=IFCondExpStoc.default_parameters['v_thresh'],
tau_syn_E=IFCondExpStoc.default_parameters['tau_syn_E'],
tau_syn_I=IFCondExpStoc.default_parameters['tau_syn_I'],
tau_refrac=IFCondExpStoc.default_parameters['tau_refrac'],
i_offset=IFCondExpStoc.default_parameters['i_offset'],
e_rev_E=IFCondExpStoc.default_parameters['e_rev_E'],
e_rev_I=IFCondExpStoc.default_parameters['e_rev_I'],
du_th=IFCondExpStoc.default_parameters['du_th'],
tau_th=IFCondExpStoc.default_parameters['tau_th'],
v_init=IFCondExpStoc.none_pynn_default_parameters['v_init'],
isyn_exc=IFCondExpStoc.default_parameters['isyn_exc'],
isyn_inh=IFCondExpStoc.default_parameters['isyn_inh']):
DataHolder.__init__(
self,
{
'spikes_per_second': spikes_per_second,
'ring_buffer_sigma': ring_buffer_sigma,
'incoming_spike_buffer_size': incoming_spike_buffer_size,
'constraints': constraints, 'label': label,
'tau_m': tau_m, 'cm': cm, 'v_rest': v_rest,
'v_reset': v_reset, 'v_thresh': v_thresh,
'tau_syn_E': tau_syn_E, 'tau_syn_I': tau_syn_I,
'tau_refrac': tau_refrac, 'i_offset': i_offset,
'e_rev_E': e_rev_E, 'e_rev_I': e_rev_I, 'du_th': du_th,
'tau_th': tau_th, 'v_init': v_init, 'isyn_exc': isyn_exc,
'isyn_inh': isyn_inh})
@staticmethod
def build_model():
return IFCondExpStoc | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/models/model_data_holders/if_cond_exp_stoc_data_holder.py | 0.652574 | 0.328745 | if_cond_exp_stoc_data_holder.py | pypi |
def compare_spiketrain(spiketrain1, spiketrain2):
"""
Checks two Spiketrains have the exact same data
:param spiketrain1: fgirst spiketrain
:type spiketrain1: SpikeTrain
:param spiketrain2:
:type spiketrain: SpikeTrain
:raises AssertionError
"""
id1 = spiketrain1.annotations['source_index']
id2 = spiketrain2.annotations['source_index']
if id1 != id2:
msg = "Different annotations['source_index'] found {} and {}" \
"".format(id1, id2)
raise AssertionError(msg)
if len(spiketrain1) != len(spiketrain2):
msg = "spiketrains1 has {} spikes while spiketrains2 as {} for " \
"id {}".format(len(spiketrain1), len(spiketrain2), id1)
raise AssertionError(msg)
for spike1, spike2 in zip(spiketrain1, spiketrain2):
if spike1 != spike2:
print id1, spiketrain1, spiketrain2
msg = "spike1 is {} while spike2 is {} for " \
"id {}".format(spike1, spike2, id1)
raise AssertionError(msg)
def compare_spiketrains(spiketrains1, spiketrains2, same_data=True):
"""
Check two Lists of SpikeTrains have the exact same data
:param spiketrains1: First list SpikeTrains to comapre
:type spiketrains1: List[SpikeTrain]
:param spiketrains2: Second list of SpikeTrains to comapre
:type spiketrains2: List[SpikeTrain]
:param same_data: Flag to indicate if the same type of data is held.
Ie: Same spikes, v, gsyn_exc and gsyn_ihn
If False allows one or both lists to be Empty
Even if False none empty lists must be the same length
:type same_data: bool
:raises AssertionError
"""
if not same_data:
if len(spiketrains1) == 0 or len(spiketrains2) == 0:
return
if len(spiketrains1) != len(spiketrains2):
msg = "spiketrains1 has {} spiketrains while spiketrains2 as {} " \
"analogsignalarrays".format(len(spiketrains1),
len(spiketrains2))
raise AssertionError(msg)
for spiketrain1, spiketrain2 in zip(spiketrains1, spiketrains2):
compare_spiketrain(spiketrain1, spiketrain2)
def compare_analogsignalarray(asa1, asa2):
"""
Compares two analogsignalarray Objects to see if they are the same
:param asa1: first analogsignalarray
holding list of individnal analogsignalarray Objects
:type asa1 Analogsignalarray
:param asa2: second analogsignalarray
holding list of individnal analogsignalarray Objects
:type asa2 Analogsignalarray
:raises AssertionError
"""
if asa1.name != asa2.name:
msg = "analogsignalarray1 has name {} while analogsignalarray1 has " \
"{} ".format(asa1.name, asa2.name)
raise AssertionError(msg)
if len(asa1.channel_index) != len(asa2.channel_index):
msg = "channel_index 1 has len {} while channel_index 2 has {} " \
"for {}".format(len(asa1.channel_index),
len(asa2.channel_index), asa1.name)
raise AssertionError(msg)
for channel1, channel2 in zip(asa1.channel_index, asa2.channel_index):
if channel1 != channel2:
msg = "channel 1 is while channel 2 is {} " \
"for {}".format(channel1, channel2, asa1.name)
raise AssertionError(msg)
if len(asa1.times) != len(asa2.times):
msg = "times 1 has len {} while times 2 has {} " \
"for {}".format(len(asa1.times),
len(asa2.times), asa1.name)
raise AssertionError(msg)
for time1, time2 in zip(asa1.times, asa2.times):
if time1 != time2:
msg = "time 1 is while time 2 is {} " \
"for {}".format(time1, time2, asa1.name)
raise AssertionError(msg)
if len(asa1) != len(asa2):
msg = "analogsignalarray 1 has len {} while analogsignalarray 2 has " \
"{} for {}".format(len(asa1), len(asa2), asa1.name)
raise AssertionError(msg)
for signal1, signal2 in zip(asa1, asa2):
# print signal1, signal2
if len(signal1) != len(signal2):
msg = "signal 1 has len {} while signal 2 has " \
"{} for {}".format(len(signal1), len(signal2), asa1.name)
raise AssertionError(msg)
for value1, value2 in zip(signal1, signal2):
if value1 != value2:
msg = "value 1 is while value2 is {} " \
"for {}".format(value1, value2, asa1.name)
raise AssertionError(msg)
def compare_segments(seg1, seg2, same_data=True):
"""
:param seg1: First Segment to check
:type seg1: Segment
:param seg2: Second Segment to check
:type seg2: Segment
:param same_data: Flag to indicate if the same type of data is held.
Ie: Same spikes, v, gsyn_exc and gsyn_ihn
If False only data in both blocks is compared
:type same_data: bool
:raises AssertionError
"""
compare_spiketrains(seg1.spiketrains, seg2.spiketrains, same_data)
if same_data and \
len(seg1.analogsignalarrays) != len(seg2.analogsignalarrays):
msg = "Segment1 has {} analogsignalarrays while Segment2 as {} " \
"analogsignalarrays".format(len(seg1.analogsignalarrays),
len(seg1.analogsignalarrays))
raise AssertionError(msg)
for analogsignalarray1 in seg1.analogsignalarrays:
name = analogsignalarray1.name
filtered = seg2.filter(name=name)
if len(filtered) == 0:
if same_data:
msg = "Segment1 has {} data while Segment2 does not" \
"".format(name)
raise AssertionError(msg)
else:
analogsignalarray2 = seg2.filter(name=name)[0]
compare_analogsignalarray(analogsignalarray1, analogsignalarray2)
def compare_blocks(neo1, neo2, same_runs=True, same_data=True):
"""
Compares Two neo Blocks to see if they hold the same data.
:param neo1: First block to check
:type neo1: Block
:param neo2: Second block to check
:type Block:
:param same_runs: Flag to signal if blocks are the same length
If False extra segments in the larger block are ignored
:type same_runs: bool
:param same_data: Flag to indicate if the same type of data is held.
Ie: Same spikes, v, gsyn_exc and gsyn_ihn
If False only data in both blocks is compared
:type same_data: bool
:raises AssertionError
"""
if same_runs and len(neo1.segments) != len(neo2.segments):
msg = "Block1 has {} segments while block2 as {} segments" \
"".format(len(neo1.segments), len(neo2.segments))
raise AssertionError(msg)
for seg1, seg2 in zip(neo1.segments, neo2.segments):
compare_segments(seg1, seg2, same_data) | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/utilities/neo_compare.py | 0.662469 | 0.621053 | neo_compare.py | pypi |
from quantities import ms
import numpy as np
def convert_analog_signalarray(signal_array, time_unit=ms):
"""
Converts part of a NEO object into told spynakker7 format
:param signal_array: Extended Quantities object
:param time_unit: Data time unit for time index
:rtype ndarray
"""
ids = signal_array.channel_index
if time_unit == ms:
times = signal_array.times.magnitude
else:
times = signal_array.times.rescale(time_unit).magnitude
all_times = np.tile(times, len(ids))
neurons = np.repeat(ids, len(times))
# I am unsure of what happens if ids is not a contious list of integers
values = np.concatenate(map(lambda x: signal_array[:, x].magnitude, ids))
return np.column_stack((neurons, all_times, values))
def convert_data(data, name, run=0):
"""
Converts the data into a numpy array in the format id, time, value
:param data: Data as returned by a getData() call
:type data: SpynnakerNeoBlock
:param name: Nane of the data to be extracted.
Same values as used in getData()
:type str
:param run: Zero based index of the run to extract data for
:type int
:return: nparray
"""
if len(data.segments) <= run:
raise ValueError("Data only contains {} so unable to run {}. "
"Note run is the zero based index."
"".format(len(data.segments), run))
if name == "all":
raise ValueError("Unable to convert all data in one go "
"as result would be comparing apples and oranges.")
if name == "spikes":
return convert_spikes(data, run)
return convert_analog_signalarray(data.segments[run].filter(name=name)[0])
def convert_data_list(data, name, runs=None):
"""
Converts the data into a list of numpy arrays in the format id, time, value
:param data: Data as returned by a getData() call
:type data: SpynnakerNeoBlock
:param name: Name of the data to be extracted.
Same values as used in getData()
:type str
:param runs: List of Zero based index of the run to extract data for.
Or None to extract all runs
:return: [nparray]
"""
results = []
if runs is None:
runs = range(len(data.segments))
for run in runs:
results.append(convert_data(data, name, run=run))
return results
def convert_v_list(data):
"""
Converts the voltage into a list numpy array one per segment (all runs)
in the format id, time, value
:type data: SpynnakerNeoBlock Must have v data
:return: [nparray]
"""
return convert_data_list(data, "v", runs=None)
def convert_gsyn_exc_list(data):
"""
Converts the gsyn_exc into a list numpy array one per segment (all runs)
in the format id, time, value
:type data: SpynnakerNeoBlock Must have gsyn_exc data
:return: [nparray]
"""
return convert_data_list(data, "gsyn_exc", runs=None)
def convert_gsyn_inh_list(data):
"""
Converts the gsyn_inh into a list numpy array one per segment (all runs)
in the format id, time, value
:type data: SpynnakerNeoBlock Must have gsyn_exc data
:return: [nparray]
"""
return convert_data_list(data, "gsyn_inh", runs=None)
def convert_gsyn(gsyn_exc, gsyn_inh):
"""
Converts two neo objects into the spynakker7 format
Note: It is acceptable for both neo parameters to be the same object
:param gsyn_exc: neo with gsyn_exc data
:param gsyn_inh: neo with gsyn_exc data
:rtype nparray
"""
exc = gsyn_exc.segments[0].filter(name='gsyn_exc')[0]
inh = gsyn_inh.segments[0].filter(name='gsyn_inh')[0]
ids = exc.channel_index
ids2 = inh.channel_index
if (len(ids) != len(ids2)):
error = "Found {} neuron ids in gsyn_exc but {} in gsyn_inh" \
"".format(len(ids), len(ids2))
raise ValueError(error)
if (not np.allclose(ids, ids2)):
raise ValueError("ids in gsyn_exc and gsyn_inh do not match")
times = exc.times.rescale(ms)
times2 = inh.times.rescale(ms)
if (len(times) != len(times2)):
error = "Found {} times in gsyn_exc but {} in gsyn_inh" \
"".format(len(times), len(times))
raise ValueError(error)
if (not np.allclose(times, times2)):
raise ValueError("times in gsyn_exc and gsyn_inh do not match")
all_times = np.tile(times, len(ids))
neurons = np.repeat(ids, len(times))
exc_np = np.concatenate(map(lambda x: exc[:, x], range(len(ids))))
inh_np = np.concatenate(map(lambda x: inh[:, x], range(len(ids))))
return np.column_stack((neurons, all_times, exc_np, inh_np))
def convert_spiketrains(spiketrains):
"""
Converts a list of spiketrains into spynakker7 format
:param spiketrains: List of SpikeTrains
:rtype nparray
"""
neurons = np.concatenate(map(lambda x:
np.repeat(x.annotations['source_index'],
len(x)),
spiketrains))
spikes = np.concatenate(map(lambda x: x.magnitude, spiketrains))
return np.column_stack((neurons, spikes))
def convert_spikes(neo, run=0):
"""
Extracts the spikes for run one from a Neo Object
:param neo: neo Object incliding Spike Data
:param run: Zero based index of the run to extract data for
:type int
:rtype nparray
"""
if len(neo.segments) <= run:
raise ValueError("Data only contains {} so unable to run {}. "
"Note run is the zero based index."
"".format(len(neo.segments), run))
return convert_spiketrains(neo.segments[run].spiketrains)
def count_spiketrains(spiketrains):
"""
Help function to count the number of spikes in a list of spiketrains
:param spiketrains: List of SpikeTrains
:return Total number of spikes in all the spiketrains
"""
return sum(map(len, spiketrains))
def count_spikes(neo):
"""
Help function to count the number of spikes in a list of spiketrains
Only counts run 0
:param neo: Neo Object which has spikes in it
:return:
"""
return count_spiketrains(neo.segments[0].spiketrains) | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/utilities/neo_convertor.py | 0.7874 | 0.715399 | neo_convertor.py | pypi |
from neo import Segment, SpikeTrain, AnalogSignalArray
import numpy
import quantities as pq
class SpynnakerNeoSegment(Segment):
""" spynnaker version of the neo segment holding the data elements as
needed
"""
def __init__(
self, name=None, description=None, file_origin=None,
file_datetime=None, rec_datetime=None, index=None,
**annotations):
Segment.__init__(self, name, description, file_origin, file_datetime,
rec_datetime, index, **annotations)
self._spike_trains = list()
self._analog_signal_arrays = list()
@property
def spiketrains(self):
return self._spike_trains
@spiketrains.setter
def spiketrains(self, new_value):
self._spike_trains = new_value
def read_in_spikes(self, spikes, t, ids, indexes, first_id,
recording_start_time, label):
"""
Converts the data into SpikeTrains and saves them to the segment
:param spikes: Spike data in raw spynakker format
:type spikes: nparray
:param t: last simulation time
:type t: int
:param ids: list of the ids to save spikes for
:type ids: nparray
:param indexes: list of the channle indexes
:type indexes: nparray
:param first_id: id of first neuron
:type first_id: int
:param recording_start_time: time recording started
:type recording_start_time: int
:param label: rocing elements label
:type label: str
:rtype None
"""
t_stop = t * pq.ms
for (id, index) in zip(ids, indexes):
# get times per atom
self.spiketrains.append(
SpikeTrain(
times=spikes[spikes[:, 0] ==
id - first_id][:, 1],
t_start=recording_start_time,
t_stop=t_stop,
units='ms',
source_population=label,
source_id=id,
source_index=index))
@staticmethod
def _convert_extracted_data_into_neo_expected_format(
signal_array, channel_indices):
"""
Converts data between spynakker format and neo format
:param signal_array: Draw data in spynakker format
:type signal_array: nparray
:param channel_indices: indexes to each neuron
:type channel_indices: nparray
:rtype nparray
"""
processed_data = [
signal_array[:, 2][signal_array[:, 0] == index]
for index in channel_indices]
processed_data = numpy.vstack(processed_data).T
return processed_data
def read_in_signal(self, signal_array, ids, indexes, variable,
recording_start_time, sampling_interval, units, label):
""" reads in a data item that's not spikes (likely v, gsyn e, gsyn i)
Saves this data to the segment.
:param signal_array: the raw signal data
:param segment: the segment to put the data into
:param ids: the recorded ids
:param variable: the variable name
:return: None
"""
t_start = recording_start_time * pq.ms
sampling_period = sampling_interval * pq.ms
if signal_array.size > 0:
processed_data = \
self._convert_extracted_data_into_neo_expected_format(
signal_array, indexes)
source_ids = numpy.fromiter(ids, dtype=int)
data_array = AnalogSignalArray(
processed_data,
units=units,
t_start=t_start,
sampling_period=sampling_period,
name=variable,
source_population=label,
channel_index=indexes,
source_ids=source_ids)
data_array.shape = (
data_array.shape[0], data_array.shape[1])
self.analogsignalarrays.append(data_array)
@property
def analogsignalarrays(self):
return self._analog_signal_arrays
@analogsignalarrays.setter
def analogsignalarrays(self, new_value):
self._analog_signal_arrays = new_value | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/utilities/spynnaker8_neo_segment.py | 0.87232 | 0.54256 | spynnaker8_neo_segment.py | pypi |
import logging
import re
levels = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL,
}
class ConfiguredFilter(object):
def __init__(self, conf):
self._levels = ConfiguredFormatter.construct_logging_parents(conf)
self._default_level = levels[conf.get("Logging", "default")]
def filter(self, record):
"""Get the level for the deepest parent, and filter appropriately."""
level = ConfiguredFormatter.level_of_deepest_parent(self._levels,
record.name)
if level is None:
return record.levelno >= self._default_level
return record.levelno >= level
class ConfiguredFormatter(logging.Formatter):
def __init__(self, conf):
level = conf.get("Logging", "default")
if level == "debug":
super(ConfiguredFormatter, self).__init__(
fmt="%(asctime)-15s %(levelname)s: %(pathname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
else:
super(ConfiguredFormatter, self).__init__(
fmt="%(asctime)-15s %(levelname)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S")
@staticmethod
def construct_logging_parents(conf):
""" Create a dictionary of module names and logging levels.
"""
# Construct the dictionary
_levels = {}
if not conf.has_section("Logging"):
return _levels
for label, level in levels.items():
if conf.has_option("Logging", label):
modules = map(
lambda s: s.strip(),
conf.get('Logging', label).split(',')
)
if '' not in modules:
_levels.update(
dict(map(lambda m: (m, level), modules)))
return _levels
@staticmethod
def deepest_parent(parents, child):
""" Greediest match between child and parent.
"""
# TODO: this can almost certainly be neater!
# Repeatedly strip elements off the child until we match an item in
# parents
match = child
while '.' in match and match not in parents:
match = re.sub(r'\.[^.]+$', '', match)
# If no match then return None, there is no deepest parent
if match not in parents:
match = None
return match
@staticmethod
def level_of_deepest_parent(parents, child):
""" The logging level of the greediest match between child and parent.
"""
# child = re.sub( r'^pacman103\.', '', child )
parent = ConfiguredFormatter.deepest_parent(parents.keys(), child)
if parent is None:
return None
return parents[parent] | /sPyNNaker8-1!4.0.0.tar.gz/sPyNNaker8-1!4.0.0/spynnaker8/utilities/log.py | 0.564699 | 0.156201 | log.py | pypi |
from pacman.model.partitionable_graph.multi_cast_partitionable_edge\
import MultiCastPartitionableEdge
from spinnman.messages.eieio.eieio_type import EIEIOType
from spynnaker.pyNN import get_spynnaker
from spinn_front_end_common.utility_models.live_packet_gather \
import LivePacketGather
PARTITION_ID = "SPIKE"
class SpynnakerExternalDevicePluginManager(object):
def __init__(self):
self._live_spike_recorders = dict()
def add_socket_address(self, socket_address):
""" Add a socket address to the list to be checked by the\
notification protocol
:param socket_address: the socket address
:type socket_address:
:return:
"""
_spinnaker = get_spynnaker()
_spinnaker._add_socket_address(socket_address)
def add_edge_to_recorder_vertex(
self, vertex_to_record_from, port, hostname, tag=None,
board_address=None,
strip_sdp=True, use_prefix=False, key_prefix=None,
prefix_type=None, message_type=EIEIOType.KEY_32_BIT,
right_shift=0, payload_as_time_stamps=True,
use_payload_prefix=True, payload_prefix=None,
payload_right_shift=0, number_of_packets_sent_per_time_step=0):
_spinnaker = get_spynnaker()
# locate the live spike recorder
if (port, hostname) in self._live_spike_recorders:
live_spike_recorder = self._live_spike_recorders[(port, hostname)]
else:
live_spike_recorder = LivePacketGather(
_spinnaker.machine_time_step, _spinnaker.timescale_factor,
hostname, port, board_address, tag, strip_sdp, use_prefix,
key_prefix, prefix_type, message_type, right_shift,
payload_as_time_stamps, use_payload_prefix, payload_prefix,
payload_right_shift, number_of_packets_sent_per_time_step,
label="LiveSpikeReceiver")
self._live_spike_recorders[(port, hostname)] = live_spike_recorder
_spinnaker.add_partitionable_vertex(live_spike_recorder)
# create the edge and add
edge = MultiCastPartitionableEdge(
vertex_to_record_from, live_spike_recorder, label="recorder_edge")
_spinnaker.add_partitionable_edge(edge, PARTITION_ID)
def add_edge(self, vertex, device_vertex):
_spinnaker = get_spynnaker()
edge = MultiCastPartitionableEdge(vertex, device_vertex)
_spinnaker.add_partitionable_edge(edge) | /sPyNNakerExternalDevicesPlugin-2016.001.zip/sPyNNakerExternalDevicesPlugin-2016.001/spynnaker_external_devices_plugin/pyNN/spynnaker_external_device_plugin_manager.py | 0.72952 | 0.221645 | spynnaker_external_device_plugin_manager.py | pypi |
import os
from spinnman.messages.eieio.eieio_type import EIEIOType
from spynnaker_external_devices_plugin.pyNN.external_devices_models.\
external_cochlea_device import ExternalCochleaDevice
from spynnaker_external_devices_plugin.pyNN.external_devices_models.\
external_fpga_retina_device import ExternalFPGARetinaDevice
from spynnaker_external_devices_plugin.pyNN.external_devices_models.\
munich_retina_device import MunichRetinaDevice
from spynnaker_external_devices_plugin.pyNN.external_devices_models.\
pushbot_retina_device import PushBotRetinaDevice
from spynnaker_external_devices_plugin.pyNN.external_devices_models.\
pushbot_retina_device import PushBotRetinaResolution
from spynnaker_external_devices_plugin.pyNN.external_devices_models.\
pushbot_retina_device import PushBotRetinaPolarity
from spynnaker_external_devices_plugin.pyNN.external_devices_models.\
munich_motor_device import MunichMotorDevice
from spynnaker_external_devices_plugin.pyNN import model_binaries
from spynnaker_external_devices_plugin.pyNN.\
spynnaker_external_device_plugin_manager import \
SpynnakerExternalDevicePluginManager
from spynnaker_external_devices_plugin.pyNN.utility_models.spike_injector \
import SpikeInjector as SpynnakerExternalDeviceSpikeInjector
from spynnaker_external_devices_plugin.pyNN.connections\
.spynnaker_live_spikes_connection import SpynnakerLiveSpikesConnection
from spynnaker.pyNN.utilities import conf
from spynnaker.pyNN import IF_curr_exp
from spynnaker.pyNN.spinnaker import executable_finder
from spinn_front_end_common.utilities.notification_protocol.socket_address \
import SocketAddress
executable_finder.add_path(os.path.dirname(model_binaries.__file__))
spynnaker_external_devices = SpynnakerExternalDevicePluginManager()
def activate_live_output_for(
population, database_notify_host=None, database_notify_port_num=None,
database_ack_port_num=None, board_address=None, port=None,
host=None, tag=None, strip_sdp=True, use_prefix=False, key_prefix=None,
prefix_type=None, message_type=EIEIOType.KEY_32_BIT,
right_shift=0, payload_as_time_stamps=True,
use_payload_prefix=True, payload_prefix=None,
payload_right_shift=0, number_of_packets_sent_per_time_step=0):
""" Output the spikes from a given population from SpiNNaker as they
occur in the simulation
:param population: The population to activate the live output for
:type population: Population
:param database_notify_host: the hostname for the device which is\
listening to the database notification.
:type database_notify_host: str
:param database_ack_port_num: the port number to which a external device\
will ack that they have finished reading the database and are\
ready for it to start execution
:type database_ack_port_num: int
:param database_notify_port_num: The port number to which a external\
device will receive the database is ready command
:type database_notify_port_num: int
:param board_address: A fixed board address required for the tag, or\
None if any address is OK
:type board_address: str
:param key_prefix: the prefix to be applied to the key
:type key_prefix: int or None
:param prefix_type: if the prefix type is 32 bit or 16 bit
:param message_type: if the message is a eieio_command message, or\
eieio data message with 16 bit or 32 bit keys.
:param payload_as_time_stamps:
:param right_shift:
:param use_payload_prefix:
:param payload_prefix:
:param payload_right_shift:
:param number_of_packets_sent_per_time_step:
:param port: The UDP port to which the live spikes will be sent. If not\
specified, the port will be taken from the "live_spike_port"\
parameter in the "Recording" section of the spynnaker cfg file.
:type port: int
:param host: The host name or IP address to which the live spikes will be\
sent. If not specified, the host will be taken from the\
"live_spike_host" parameter in the "Recording" section of the\
spynnaker cfg file.
:type host: str
:param tag: The IP tag to be used for the spikes. If not specified, one\
will be automatically assigned
:type tag: int
:param strip_sdp: Determines if the SDP headers will be stripped from the\
transmitted packet.
:type strip_sdp: bool
:param use_prefix: Determines if the spike packet will contain a common\
prefix for the spikes
:type use_prefix: bool
"""
# get default params if none set
if port is None:
port = conf.config.getint("Recording", "live_spike_port")
if host is None:
host = conf.config.get("Recording", "live_spike_host")
# get default params for the database socket if required
if database_notify_port_num is None:
database_notify_port_num = conf.config.getint("Database",
"notify_port")
if database_notify_host is None:
database_notify_host = conf.config.get("Database", "notify_hostname")
if database_ack_port_num is None:
database_ack_port_num = conf.config.get("Database", "listen_port")
if database_ack_port_num == "None":
database_ack_port_num = None
# add new edge and vertex if required to spinnaker graph
spynnaker_external_devices.add_edge_to_recorder_vertex(
population._vertex, port, host, tag, board_address, strip_sdp,
use_prefix, key_prefix, prefix_type, message_type, right_shift,
payload_as_time_stamps, use_payload_prefix, payload_prefix,
payload_right_shift, number_of_packets_sent_per_time_step)
# build the database socket address used by the notifcation interface
database_socket = SocketAddress(
listen_port=database_ack_port_num,
notify_host_name=database_notify_host,
notify_port_no=database_notify_port_num)
# update socket interface with new demands.
spynnaker_external_devices.add_socket_address(database_socket)
def activate_live_output_to(population, device):
""" Activate the output of spikes from a population to an external device.\
Note that all spikes will be sent to the device.
:param population: The pyNN population object from which spikes will be\
sent.
:param device: The pyNN population external device to which the spikes\
will be sent.
"""
spynnaker_external_devices.add_edge(
population._get_vertex, device._get_vertex)
def SpikeInjector(
n_neurons, machine_time_step, timescale_factor, label, port,
virtual_key=None, database_notify_host=None,
database_notify_port_num=None, database_ack_port_num=None):
""" Supports adding a spike injector to the application graph.
:param n_neurons: the number of neurons the spike injector will emulate
:type n_neurons: int
:param machine_time_step: the time period in ms for each timer callback
:type machine_time_step: int
:param timescale_factor: the amount of scaling needed of the machine time\
step (basically a slow down function)
:type timescale_factor: int
:param label: the label given to the population
:type label: str
:param port: the port number used to listen for injections of spikes
:type port: int
:param virtual_key: the virtual key used in the routing system
:type virtual_key: int
:param database_notify_host: the hostname for the device which is\
listening to the database notification.
:type database_notify_host: str
:param database_ack_port_num: the port number to which a external device\
will ack that they have finished reading the database and are\
ready for it to start execution
:type database_ack_port_num: int
:param database_notify_port_num: The port number to which a external\
device will receive the database is ready command
:type database_notify_port_num: int
:return:
"""
if database_notify_port_num is None:
database_notify_port_num = conf.config.getint("Database",
"notify_port")
if database_notify_host is None:
database_notify_host = conf.config.get("Database", "notify_hostname")
if database_ack_port_num is None:
database_ack_port_num = conf.config.get("Database", "listen_port")
if database_ack_port_num == "None":
database_ack_port_num = None
# build the database socket address used by the notification interface
database_socket = SocketAddress(
listen_port=database_ack_port_num,
notify_host_name=database_notify_host,
notify_port_no=database_notify_port_num)
# update socket interface with new demands.
spynnaker_external_devices.add_socket_address(database_socket)
return SpynnakerExternalDeviceSpikeInjector(
n_neurons=n_neurons, machine_time_step=machine_time_step,
timescale_factor=timescale_factor, label=label, port=port,
virtual_key=virtual_key) | /sPyNNakerExternalDevicesPlugin-2016.001.zip/sPyNNakerExternalDevicesPlugin-2016.001/spynnaker_external_devices_plugin/pyNN/__init__.py | 0.601125 | 0.227781 | __init__.py | pypi |
from spynnaker.pyNN.models.abstract_models\
.abstract_vertex_with_dependent_vertices import \
AbstractVertexWithEdgeToDependentVertices
from spynnaker.pyNN import exceptions
# pacman imports
from pacman.model.constraints.key_allocator_constraints\
.key_allocator_fixed_mask_constraint \
import KeyAllocatorFixedMaskConstraint
from spinn_front_end_common.utilities import constants
from pacman.model.abstract_classes.abstract_virtual_vertex \
import AbstractVirtualVertex
from pacman.model.partitionable_graph.abstract_partitionable_vertex \
import AbstractPartitionableVertex
# front end common imports
from spinn_front_end_common.abstract_models.abstract_data_specable_vertex\
import AbstractDataSpecableVertex
from spinn_front_end_common.abstract_models\
.abstract_provides_outgoing_partition_constraints\
import AbstractProvidesOutgoingPartitionConstraints
from data_specification.data_specification_generator import \
DataSpecificationGenerator
# general imports
import logging
logger = logging.getLogger(__name__)
class _MunichMotorDevice(AbstractVirtualVertex):
def __init__(self, spinnaker_link_id):
AbstractVirtualVertex.__init__(
self, 6, spinnaker_link_id,
"External Munich Motor", max_atoms_per_core=6)
@property
def model_name(self):
return "external motor device"
def is_virtual_vertex(self):
return True
class MunichMotorDevice(AbstractDataSpecableVertex,
AbstractPartitionableVertex,
AbstractVertexWithEdgeToDependentVertices,
AbstractProvidesOutgoingPartitionConstraints):
""" An Omnibot motor control device - has a real vertex and an external\
device vertex
"""
SYSTEM_REGION = 0
PARAMS_REGION = 1
SYSTEM_SIZE = 4 * 4
PARAMS_SIZE = 7 * 4
def __init__(
self, n_neurons, machine_time_step, timescale_factor,
spinnaker_link_id, speed=30, sample_time=4096, update_time=512,
delay_time=5, delta_threshold=23, continue_if_not_different=True,
label="RobotMotorControl"):
"""
"""
if n_neurons != 6:
logger.warn("The specified number of neurons for the munich motor"
" device has been ignored; 6 will be used instead")
AbstractDataSpecableVertex.__init__(self, machine_time_step,
timescale_factor)
AbstractPartitionableVertex.__init__(self, 6, label, 6, None)
AbstractVertexWithEdgeToDependentVertices.__init__(
self, [_MunichMotorDevice(spinnaker_link_id)], None)
AbstractProvidesOutgoingPartitionConstraints.__init__(self)
self._speed = speed
self._sample_time = sample_time
self._update_time = update_time
self._delay_time = delay_time
self._delta_threshold = delta_threshold
self._continue_if_not_different = continue_if_not_different
def get_outgoing_partition_constraints(self, partition, graph_mapper):
# Any key to the device will work, as long as it doesn't set the
# management bit. We also need enough for the configuration bits
# and the management bit anyway
return list([KeyAllocatorFixedMaskConstraint(0xFFFFF800)])
def generate_data_spec(
self, subvertex, placement, partitioned_graph, graph,
routing_info, hostname, graph_subgraph_mapper,
report_folder, ip_tags, reverse_ip_tags,
write_text_specs, application_run_time_folder):
"""
Model-specific construction of the data blocks necessary to build a
single external retina device.
"""
# Create new DataSpec for this processor:
data_writer, report_writer = \
self.get_data_spec_file_writers(
placement.x, placement.y, placement.p, hostname, report_folder,
write_text_specs, application_run_time_folder)
spec = DataSpecificationGenerator(data_writer, report_writer)
# reserve regions
self.reserve_memory_regions(spec)
# Write the setup region
spec.comment("\n*** Spec for robot motor control ***\n\n")
self._write_basic_setup_info(spec, self.SYSTEM_REGION)
# locate correct subedge for key
edge_key = None
if len(graph.outgoing_edges_from_vertex(self)) != 1:
raise exceptions.SpynnakerException(
"This motor should only have one outgoing edge to the robot")
partitions = partitioned_graph.\
outgoing_edges_partitions_from_vertex(subvertex)
for partition in partitions.values():
edge_keys_and_masks = \
routing_info.get_keys_and_masks_from_partition(partition)
edge_key = edge_keys_and_masks[0].key
# write params to memory
spec.switch_write_focus(region=self.PARAMS_REGION)
spec.write_value(data=edge_key)
spec.write_value(data=self._speed)
spec.write_value(data=self._sample_time)
spec.write_value(data=self._update_time)
spec.write_value(data=self._delay_time)
spec.write_value(data=self._delta_threshold)
if self._continue_if_not_different:
spec.write_value(data=1)
else:
spec.write_value(data=0)
# End-of-Spec:
spec.end_specification()
data_writer.close()
return data_writer.filename
# inherited from data specable vertex
def get_binary_file_name(self):
return "robot_motor_control.aplx"
def reserve_memory_regions(self, spec):
"""
Reserve SDRAM space for memory areas:
1) Area for information on what data to record
2) area for start commands
3) area for end commands
"""
spec.comment("\nReserving memory space for data regions:\n\n")
# Reserve memory:
spec.reserve_memory_region(
region=self.SYSTEM_REGION,
size=constants.DATA_SPECABLE_BASIC_SETUP_INFO_N_WORDS * 4,
label='setup')
spec.reserve_memory_region(region=self.PARAMS_REGION,
size=self.PARAMS_SIZE,
label='params')
@property
def model_name(self):
return "Munich Motor Control"
def get_sdram_usage_for_atoms(self, vertex_slice, graph):
return self.SYSTEM_SIZE + self.PARAMS_SIZE
def get_dtcm_usage_for_atoms(self, vertex_slice, graph):
return 0
def get_cpu_usage_for_atoms(self, vertex_slice, graph):
return 0
def has_dependent_vertices(self):
return True
def is_data_specable(self):
return True
def partition_identifier_for_dependent_edge(self, dependent_edge):
return None | /sPyNNakerExternalDevicesPlugin-2016.001.zip/sPyNNakerExternalDevicesPlugin-2016.001/spynnaker_external_devices_plugin/pyNN/external_devices_models/munich_motor_device.py | 0.860852 | 0.378172 | munich_motor_device.py | pypi |
import logging
from spinn_front_end_common.abstract_models.\
abstract_provides_outgoing_partition_constraints import \
AbstractProvidesOutgoingPartitionConstraints
from spynnaker.pyNN.models.abstract_models\
.abstract_send_me_multicast_commands_vertex \
import AbstractSendMeMulticastCommandsVertex
from spynnaker.pyNN import exceptions
from spynnaker.pyNN.utilities.multi_cast_command import MultiCastCommand
from pacman.model.abstract_classes.abstract_virtual_vertex \
import AbstractVirtualVertex
from pacman.model.constraints.key_allocator_constraints\
.key_allocator_fixed_key_and_mask_constraint \
import KeyAllocatorFixedKeyAndMaskConstraint
from pacman.model.routing_info.base_key_and_mask import BaseKeyAndMask
logger = logging.getLogger(__name__)
def get_y_from_fpga_retina(key, mode):
if mode == 128:
return key & 0x7f
elif mode == 64:
return key & 0x3f
elif mode == 32:
return key & 0x1f
elif mode == 16:
return key & 0xf
else:
return None
def get_x_from_fpga_retina(key, mode):
if mode == 128:
return (key >> 7) & 0x7f
elif mode == 64:
return (key >> 6) & 0x3f
elif mode == 32:
return (key >> 5) & 0x1f
elif mode == 16:
return (key >> 4) & 0xf
else:
return None
def get_spike_value_from_fpga_retina(key, mode):
if mode == 128:
return (key >> 14) & 0x1
elif mode == 64:
return (key >> 14) & 0x1
elif mode == 32:
return (key >> 14) & 0x1
elif mode == 16:
return (key >> 14) & 0x1
else:
return None
class ExternalFPGARetinaDevice(
AbstractVirtualVertex, AbstractSendMeMulticastCommandsVertex,
AbstractProvidesOutgoingPartitionConstraints):
MODE_128 = "128"
MODE_64 = "64"
MODE_32 = "32"
MODE_16 = "16"
UP_POLARITY = "UP"
DOWN_POLARITY = "DOWN"
MERGED_POLARITY = "MERGED"
def __init__(
self, mode, retina_key, spinnaker_link_id, polarity,
machine_time_step, timescale_factor, label=None, n_neurons=None):
"""
:param mode: The retina "mode"
:param retina_key: The value of the top 16-bits of the key
:param spinnaker_link_id: The spinnaker link to which the retina is\
connected
:param polarity: The "polarity" of the retina data
:param machine_time_step: The time step of the simulation
:param timescale_factor: The timescale factor of the simulation
:param label: The label for the population
:param n_neurons: The number of neurons in the population
"""
self._polarity = polarity
self._fixed_key = (retina_key & 0xFFFF) << 16
self._fixed_mask = 0xFFFF8000
if polarity == ExternalFPGARetinaDevice.UP_POLARITY:
self._fixed_key |= 0x4000
fixed_n_neurons = n_neurons
if mode == ExternalFPGARetinaDevice.MODE_128:
if (polarity == ExternalFPGARetinaDevice.UP_POLARITY or
polarity == ExternalFPGARetinaDevice.DOWN_POLARITY):
fixed_n_neurons = 128 * 128
self._fixed_mask = 0xFFFFC000
else:
fixed_n_neurons = 128 * 128 * 2
elif mode == ExternalFPGARetinaDevice.MODE_64:
if (polarity == ExternalFPGARetinaDevice.UP_POLARITY or
polarity == ExternalFPGARetinaDevice.DOWN_POLARITY):
fixed_n_neurons = 64 * 64
self._fixed_mask = 0xFFFFF000
else:
fixed_n_neurons = 64 * 64 * 2
elif mode == ExternalFPGARetinaDevice.MODE_32:
if (polarity == ExternalFPGARetinaDevice.UP_POLARITY or
polarity == ExternalFPGARetinaDevice.DOWN_POLARITY):
fixed_n_neurons = 32 * 32
self._fixed_mask = 0xFFFFFC00
else:
fixed_n_neurons = 32 * 32 * 2
elif mode == ExternalFPGARetinaDevice.MODE_16:
if (polarity == ExternalFPGARetinaDevice.UP_POLARITY or
polarity == ExternalFPGARetinaDevice.DOWN_POLARITY):
fixed_n_neurons = 16 * 16
self._fixed_mask = 0xFFFFFF00
else:
fixed_n_neurons = 16 * 16 * 2
else:
raise exceptions.SpynnakerException("the FPGA retina does not "
"recongise this mode")
if fixed_n_neurons != n_neurons and n_neurons is not None:
logger.warn("The specified number of neurons for the FPGA retina"
" device has been ignored {} will be used instead"
.format(fixed_n_neurons))
AbstractVirtualVertex.__init__(
self, fixed_n_neurons, spinnaker_link_id,
max_atoms_per_core=fixed_n_neurons, label=label)
AbstractSendMeMulticastCommandsVertex.__init__(self, commands=[
MultiCastCommand(0, 0x0000FFFF, 0xFFFF0000, 1, 5, 100),
MultiCastCommand(-1, 0x0000FFFE, 0xFFFF0000, 0, 5, 100)])
AbstractProvidesOutgoingPartitionConstraints.__init__(self)
def get_outgoing_partition_constraints(self, partition, graph_mapper):
return [KeyAllocatorFixedKeyAndMaskConstraint(
[BaseKeyAndMask(self._fixed_key, self._fixed_mask)])]
@property
def model_name(self):
return "external FPGA retina device"
def is_virtual_vertex(self):
return True
def recieves_multicast_commands(self):
return True | /sPyNNakerExternalDevicesPlugin-2016.001.zip/sPyNNakerExternalDevicesPlugin-2016.001/spynnaker_external_devices_plugin/pyNN/external_devices_models/external_fpga_retina_device.py | 0.663669 | 0.234505 | external_fpga_retina_device.py | pypi |
from spinn_front_end_common.abstract_models.\
abstract_provides_outgoing_partition_constraints import \
AbstractProvidesOutgoingPartitionConstraints
from spynnaker.pyNN.models.abstract_models\
.abstract_send_me_multicast_commands_vertex \
import AbstractSendMeMulticastCommandsVertex
from pacman.model.constraints.key_allocator_constraints\
.key_allocator_fixed_key_and_mask_constraint \
import KeyAllocatorFixedKeyAndMaskConstraint
from pacman.model.abstract_classes.abstract_virtual_vertex \
import AbstractVirtualVertex
from spynnaker.pyNN import exceptions
from pacman.model.routing_info.base_key_and_mask import BaseKeyAndMask
from spynnaker.pyNN.utilities.multi_cast_command import MultiCastCommand
# robot with 7 7 1
def get_x_from_robot_retina(key):
return (key >> 7) & 0x7f
def get_y_from_robot_retina(key):
return key & 0x7f
def get_spike_value_from_robot_retina(key):
return (key >> 14) & 0x1
class MunichRetinaDevice(
AbstractVirtualVertex, AbstractSendMeMulticastCommandsVertex,
AbstractProvidesOutgoingPartitionConstraints):
# key codes for the robot retina
MANAGEMENT_BIT = 0x400
MANAGEMENT_MASK = 0xFFFFF800
LEFT_RETINA_ENABLE = 0x45
RIGHT_RETINA_ENABLE = 0x46
LEFT_RETINA_DISABLE = 0x45
RIGHT_RETINA_DISABLE = 0x46
LEFT_RETINA_KEY_SET = 0x43
RIGHT_RETINA_KEY_SET = 0x44
UP_POLARITY = "UP"
DOWN_POLARITY = "DOWN"
MERGED_POLARITY = "MERGED"
LEFT_RETINA = "LEFT"
RIGHT_RETINA = "RIGHT"
def __init__(
self, retina_key, spinnaker_link_id, position, machine_time_step,
timescale_factor, label=None, n_neurons=None, polarity=None):
if polarity is None:
polarity = MunichRetinaDevice.MERGED_POLARITY
self._fixed_key = (retina_key & 0xFFFF) << 16
self._fixed_mask = 0xFFFF8000
if polarity == MunichRetinaDevice.UP_POLARITY:
self._fixed_key |= 0x4000
if polarity == MunichRetinaDevice.MERGED_POLARITY:
# There are 128 x 128 retina "pixels" x 2 polarities
fixed_n_neurons = 128 * 128 * 2
else:
# There are 128 x 128 retina "pixels"
fixed_n_neurons = 128 * 128
self._fixed_mask = 0xFFFFC000
AbstractVirtualVertex.__init__(
self, fixed_n_neurons, spinnaker_link_id,
max_atoms_per_core=fixed_n_neurons, label=label)
AbstractSendMeMulticastCommandsVertex.__init__(
self, self._get_commands(position))
AbstractProvidesOutgoingPartitionConstraints.__init__(self)
self._polarity = polarity
self._position = position
if (self._position != self.RIGHT_RETINA and
self._position != self.LEFT_RETINA):
raise exceptions.SpynnakerException(
"The external Retina does not recognise this _position")
if n_neurons != fixed_n_neurons and n_neurons is not None:
print "Warning, the retina will have {} neurons".format(
fixed_n_neurons)
def get_outgoing_partition_constraints(self, partition, graph_mapper):
return [KeyAllocatorFixedKeyAndMaskConstraint(
[BaseKeyAndMask(self._fixed_key, self._fixed_mask)])]
def _get_commands(self, position):
""" Return the commands for the retina external device
"""
commands = list()
# change the retina key it transmits with
# (based off if its right or left)
if position == self.RIGHT_RETINA:
key_set_command = self.MANAGEMENT_BIT | self.RIGHT_RETINA_KEY_SET
else:
key_set_command = self.MANAGEMENT_BIT | self.LEFT_RETINA_KEY_SET
# to ensure populations receive the correct packets, this needs to be
# different based on which retina
key_set_payload = (self._virtual_chip_x << 24 |
self._virtual_chip_y << 16)
commands.append(MultiCastCommand(
0, key_set_command, self.MANAGEMENT_MASK, key_set_payload,
5, 1000))
# make retina enabled (dependant on if its a left or right retina
if position == self.RIGHT_RETINA:
enable_command = self.MANAGEMENT_BIT | self.RIGHT_RETINA_ENABLE
else:
enable_command = self.MANAGEMENT_BIT | self.LEFT_RETINA_ENABLE
commands.append(MultiCastCommand(
0, enable_command, self.MANAGEMENT_MASK, 1, 5, 1000))
# disable retina
if position == self.RIGHT_RETINA:
disable_command = self.MANAGEMENT_BIT | self.RIGHT_RETINA_DISABLE
else:
disable_command = self.MANAGEMENT_BIT | self.LEFT_RETINA_DISABLE
commands.append(MultiCastCommand(
-1, disable_command, self.MANAGEMENT_MASK, 0, 5, 1000))
return commands
@property
def model_name(self):
return "external retina device at " \
"_position {} and _polarity {}".format(self._position,
self._polarity)
def recieves_multicast_commands(self):
return True
def is_virtual_vertex(self):
return True | /sPyNNakerExternalDevicesPlugin-2016.001.zip/sPyNNakerExternalDevicesPlugin-2016.001/spynnaker_external_devices_plugin/pyNN/external_devices_models/munich_retina_device.py | 0.81571 | 0.22482 | munich_retina_device.py | pypi |
from collections import namedtuple
from enum import Enum, IntEnum
from spinn_front_end_common.abstract_models.\
abstract_provides_outgoing_partition_constraints import \
AbstractProvidesOutgoingPartitionConstraints
from spynnaker.pyNN.models.abstract_models\
.abstract_send_me_multicast_commands_vertex \
import AbstractSendMeMulticastCommandsVertex
from pacman.model.constraints.key_allocator_constraints\
.key_allocator_fixed_key_and_mask_constraint \
import KeyAllocatorFixedKeyAndMaskConstraint
from spynnaker.pyNN import exceptions
from pacman.model.abstract_classes.abstract_virtual_vertex import \
AbstractVirtualVertex
from pacman.model.routing_info.base_key_and_mask import BaseKeyAndMask
from spynnaker.pyNN.utilities.multi_cast_command import MultiCastCommand
# Named tuple bundling together configuration elements of a pushbot resolution
# config
PushBotRetinaResolutionConfig = namedtuple("PushBotRetinaResolution",
["pixels", "enable_command",
"coordinate_bits"])
PushBotRetinaResolution = Enum(
value="PushBotRetinaResolution",
names=[("Native128", PushBotRetinaResolutionConfig(128, (1 << 26), 7)),
("Downsample64", PushBotRetinaResolutionConfig(64, (2 << 26), 6)),
("Downsample32", PushBotRetinaResolutionConfig(32, (3 << 26), 5)),
("Downsample16", PushBotRetinaResolutionConfig(16, (4 << 26), 4))])
PushBotRetinaPolarity = IntEnum(
value="PushBotRetinaPolarity",
names=["Up", "Down", "Merged"])
class PushBotRetinaDevice(AbstractVirtualVertex,
AbstractSendMeMulticastCommandsVertex,
AbstractProvidesOutgoingPartitionConstraints):
# Mask for all SpiNNaker->Pushbot commands
MANAGEMENT_MASK = 0xFFFFF800
# Retina-specific commands
RETINA_ENABLE = 0x1
RETINA_DISABLE = 0x0
RETINA_KEY_SET = 0x2
RETINA_NO_TIMESTAMP = (0 << 29)
# Sensor commands
SENSOR = 0x7F0
SENSOR_SET_KEY = 0x0
SENSOR_SET_PUSHBOT = 0x1
def __init__(self, fixed_key, spinnaker_link_id, machine_time_step,
timescale_factor, label=None, n_neurons=None,
polarity=PushBotRetinaPolarity.Merged,
resolution=PushBotRetinaResolution.Downsample64):
# Validate number of timestamp bytes
if not isinstance(polarity, PushBotRetinaPolarity):
raise exceptions.SpynnakerException(
"Pushbot retina polarity should be one of those defined in"
" Polarity enumeration")
if not isinstance(resolution, PushBotRetinaResolution):
raise exceptions.SpynnakerException(
"Pushbot retina resolution should be one of those defined in"
" Resolution enumeration")
# Cache resolution
self._resolution = resolution
# Build standard routing key from virtual chip coordinates
self._routing_key = fixed_key
self._retina_source_key = self._routing_key
# Calculate number of neurons
fixed_n_neurons = resolution.value.pixels ** 2
# If polarity is merged
if polarity == PushBotRetinaPolarity.Merged:
# Double number of neurons
fixed_n_neurons *= 2
# We need to mask out two coordinates and a polarity bit
mask_bits = (2 * resolution.value.coordinate_bits) + 1
# Otherwise
else:
# We need to mask out two coordinates
mask_bits = 2 * resolution.value.coordinate_bits
# If polarity is up, set polarity bit in routing key
if polarity == PushBotRetinaPolarity.Up:
polarity_bit = 1 << (2 * resolution.value.coordinate_bits)
self._routing_key |= polarity_bit
# Build routing mask
self._routing_mask = ~((1 << mask_bits) - 1) & 0xFFFFFFFF
AbstractVirtualVertex.__init__(
self, fixed_n_neurons, spinnaker_link_id,
max_atoms_per_core=fixed_n_neurons, label=label)
AbstractSendMeMulticastCommandsVertex.__init__(
self, self._get_commands())
AbstractProvidesOutgoingPartitionConstraints.__init__(self)
if n_neurons != fixed_n_neurons and n_neurons is not None:
print "Warning, the retina will have {} neurons".format(
fixed_n_neurons)
def get_outgoing_partition_constraints(self, partition, graph_mapper):
return [KeyAllocatorFixedKeyAndMaskConstraint(
[BaseKeyAndMask(self._routing_key, self._routing_mask)])]
def _get_commands(self):
"""
method that returns the commands for the retina external device
"""
# Set sensor key
commands = list()
commands.append(MultiCastCommand(
0, PushBotRetinaDevice.SENSOR | PushBotRetinaDevice.SENSOR_SET_KEY,
PushBotRetinaDevice.MANAGEMENT_MASK, self._retina_source_key,
1, 100))
# Set sensor to pushbot
commands.append(MultiCastCommand(
0, (PushBotRetinaDevice.SENSOR |
PushBotRetinaDevice.SENSOR_SET_PUSHBOT),
PushBotRetinaDevice.MANAGEMENT_MASK, 1,
1, 100))
# Ensure retina is disabled
commands.append(MultiCastCommand(
0, PushBotRetinaDevice.RETINA_DISABLE,
PushBotRetinaDevice.MANAGEMENT_MASK, 0,
1, 100))
# Set retina key
commands.append(MultiCastCommand(
0, PushBotRetinaDevice.RETINA_KEY_SET,
PushBotRetinaDevice.MANAGEMENT_MASK, self._retina_source_key,
1, 100))
# Enable retina
commands.append(MultiCastCommand(
0, PushBotRetinaDevice.RETINA_ENABLE,
PushBotRetinaDevice.MANAGEMENT_MASK,
(PushBotRetinaDevice.RETINA_NO_TIMESTAMP +
self._resolution.value.enable_command),
1, 100))
# At end of simulation, disable retina
commands.append(MultiCastCommand(
-1, PushBotRetinaDevice.RETINA_DISABLE,
PushBotRetinaDevice.MANAGEMENT_MASK, 0,
1, 100))
return commands
@property
def model_name(self):
return "pushbot retina device"
def recieves_multicast_commands(self):
return True
def is_virtual_vertex(self):
return True | /sPyNNakerExternalDevicesPlugin-2016.001.zip/sPyNNakerExternalDevicesPlugin-2016.001/spynnaker_external_devices_plugin/pyNN/external_devices_models/pushbot_retina_device.py | 0.856483 | 0.181372 | pushbot_retina_device.py | pypi |
from spinn_front_end_common.utilities.connections.live_event_connection \
import LiveEventConnection
# The maximum number of 32-bit keys that will fit in a packet
_MAX_FULL_KEYS_PER_PACKET = 63
# The maximum number of 16-bit keys that will fit in a packet
_MAX_HALF_KEYS_PER_PACKET = 127
class SpynnakerLiveSpikesConnection(LiveEventConnection):
""" A connection for receiving and sending live spikes from and to\
SpiNNaker
"""
def __init__(self, receive_labels=None, send_labels=None, local_host=None,
local_port=19999):
"""
:param receive_labels: Labels of population from which live spikes\
will be received.
:type receive_labels: iterable of str
:param send_labels: Labels of population to which live spikes will be\
sent
:type send_labels: iterable of str
:param local_host: Optional specification of the local hostname or\
ip address of the interface to listen on
:type local_host: str
:param local_port: Optional specification of the local port to listen\
on. Must match the port that the toolchain will send the\
notification on (19999 by default)
:type local_port: int
"""
LiveEventConnection.__init__(
self, "LiveSpikeReceiver", receive_labels, send_labels,
local_host, local_port)
def send_spike(self, label, neuron_id, send_full_keys=False):
""" Send a spike from a single neuron
:param label: The label of the population from which the spike will\
originate
:type label: str
:param neuron_id: The id of the neuron sending a spike
:type neuron_id: int
:param send_full_keys: Determines whether to send full 32-bit keys,\
getting the key for each neuron from the database, or\
whether to send 16-bit neuron ids directly
:type send_full_keys: bool
"""
self.send_spikes(label, [neuron_id], send_full_keys)
def send_spikes(self, label, neuron_ids, send_full_keys=False):
""" Send a number of spikes
:param label: The label of the population from which the spikes will\
originate
:type label: str
:param neuron_ids: array-like of neuron ids sending spikes
:type: [int]
:param send_full_keys: Determines whether to send full 32-bit keys,\
getting the key for each neuron from the database, or\
whether to send 16-bit neuron ids directly
:type send_full_keys: bool
"""
self.send_events(label, neuron_ids, send_full_keys) | /sPyNNakerExternalDevicesPlugin-2016.001.zip/sPyNNakerExternalDevicesPlugin-2016.001/spynnaker_external_devices_plugin/pyNN/connections/spynnaker_live_spikes_connection.py | 0.925781 | 0.312757 | spynnaker_live_spikes_connection.py | pypi |
from spynnaker.pyNN.utilities import utility_calls
from spynnaker.pyNN.models.neural_properties.neural_parameter \
import NeuronParameter
from data_specification.enums.data_type import DataType
from spynnaker.pyNN.models.neuron.threshold_types.abstract_threshold_type \
import AbstractThresholdType
import numpy
class ThresholdTypeMaassStochastic(AbstractThresholdType):
""" A stochastic threshold
"""
def __init__(self, n_neurons, du_th, tau_th, v_thresh):
AbstractThresholdType.__init__(self)
self._n_neurons = n_neurons
self._du_th = utility_calls.convert_param_to_numpy(du_th, n_neurons)
self._tau_th = utility_calls.convert_param_to_numpy(tau_th, n_neurons)
self._v_thresh = utility_calls.convert_param_to_numpy(
v_thresh, n_neurons)
@property
def v_thresh(self):
return self._v_thresh
@v_thresh.setter
def v_thresh(self, v_thresh):
self._v_thresh = utility_calls.convert_param_to_numpy(
v_thresh, self._n_neurons)
@property
def du_th(self):
return self._du_th
@du_th.setter
def du_th(self, du_th):
self._du_th = utility_calls.convert_param_to_numpy(
du_th, self._n_neurons)
@property
def tau_th(self):
return self._tau_th
@tau_th.setter
def tau_th(self, tau_th):
self._tau_th = utility_calls.convert_param_to_numpy(
tau_th, self._n_neurons)
@property
def _du_th_inv(self):
return numpy.divide(1.0, self._du_th)
@property
def _tau_th_inv(self):
return numpy.divide(1.0, self._tau_th)
def get_n_threshold_parameters(self):
return 3
def get_threshold_parameters(self):
return [
NeuronParameter(self._du_th_inv, DataType.S1615),
NeuronParameter(self._tau_th_inv, DataType.S1615),
NeuronParameter(self._v_thresh, DataType.S1615)
]
def get_n_cpu_cycles_per_neuron(self):
return 30 | /sPyNNakerExtraModelsPlugin-2016.001.zip/sPyNNakerExtraModelsPlugin-2016.001/spynnaker_extra_pynn_models/neuron/threshold_types/threshold_type_maass_stochastic.py | 0.854536 | 0.404272 | threshold_type_maass_stochastic.py | pypi |
from data_specification.enums.data_type import DataType
from spynnaker.pyNN.models.neuron.plasticity.stdp.timing_dependence\
.abstract_timing_dependence import AbstractTimingDependence
from spynnaker.pyNN.models.neuron.plasticity.stdp.synapse_structure\
.synapse_structure_weight_only import SynapseStructureWeightOnly
from spynnaker.pyNN.models.neuron.plasticity.stdp.common \
import plasticity_helpers
import logging
logger = logging.getLogger(__name__)
# Constants
LOOKUP_TAU_SIZE = 256
LOOKUP_TAU_SHIFT = 0
class TimingDependenceVogels2011(AbstractTimingDependence):
def __init__(self, alpha, tau=20.0):
AbstractTimingDependence.__init__(self)
self._alpha = alpha
self._tau = tau
self._synapse_structure = SynapseStructureWeightOnly()
@property
def tau(self):
return self._tau
def is_same_as(self, other):
if (other is None) or (not isinstance(
other, TimingDependenceVogels2011)):
return False
return ((self._tau == other._tau) and (self._alpha == other._alpha))
@property
def vertex_executable_suffix(self):
return "vogels_2011"
@property
def pre_trace_n_bytes(self):
# Trace entries consist of a single 16-bit number
return 2
def get_parameters_sdram_usage_in_bytes(self):
return 4 + (2 * LOOKUP_TAU_SIZE)
@property
def n_weight_terms(self):
return 1
def write_parameters(self, spec, machine_time_step, weight_scales):
# Check timestep is valid
if machine_time_step != 1000:
raise NotImplementedError("STDP LUT generation currently only "
"supports 1ms timesteps")
# Write alpha to spec
fixed_point_alpha = plasticity_helpers.float_to_fixed(
self._alpha, plasticity_helpers.STDP_FIXED_POINT_ONE)
spec.write_value(data=fixed_point_alpha, data_type=DataType.INT32)
# Write lookup table
plasticity_helpers.write_exp_lut(
spec, self.tau, LOOKUP_TAU_SIZE, LOOKUP_TAU_SHIFT)
@property
def synaptic_structure(self):
return self._synapse_structure | /sPyNNakerExtraModelsPlugin-2016.001.zip/sPyNNakerExtraModelsPlugin-2016.001/spynnaker_extra_pynn_models/neuron/plasticity/stdp/timing_dependence/timing_dependence_vogels_2011.py | 0.749729 | 0.463566 | timing_dependence_vogels_2011.py | pypi |
import math
from data_specification.enums.data_type import DataType
from spynnaker.pyNN.models.neuron.plasticity.stdp.timing_dependence.\
abstract_timing_dependence import AbstractTimingDependence
from spynnaker_extra_pynn_models.neuron.plasticity.stdp\
.synapse_structure.synapse_structure_weight_accumulator \
import SynapseStructureWeightAccumulator
from spynnaker.pyNN.models.neuron.plasticity.stdp.common \
import plasticity_helpers
class TimingDependenceRecurrent(AbstractTimingDependence):
def __init__(
self, accumulator_depression=-6, accumulator_potentiation=6,
mean_pre_window=35.0, mean_post_window=35.0, dual_fsm=True):
AbstractTimingDependence.__init__(self)
self.accumulator_depression_plus_one = accumulator_depression + 1
self.accumulator_potentiation_minus_one = accumulator_potentiation - 1
self.mean_pre_window = mean_pre_window
self.mean_post_window = mean_post_window
self.dual_fsm = dual_fsm
self._synapse_structure = SynapseStructureWeightAccumulator()
def is_same_as(self, other):
if (other is None) or (not isinstance(
other, TimingDependenceRecurrent)):
return False
return ((self.accumulator_depression_plus_one ==
other.accumulator_depression_plus_one) and
(self.accumulator_potentiation_minus_one ==
other.accumulator_potentiation_minus_one) and
(self.mean_pre_window == other.mean_pre_window) and
(self.mean_post_window == other.mean_post_window))
@property
def vertex_executable_suffix(self):
if self.dual_fsm:
return "recurrent_dual_fsm"
return "recurrent_pre_stochastic"
@property
def pre_trace_n_bytes(self):
# When using the seperate FSMs, pre-trace contains window length,
# otherwise it's in the synapse
return 2 if self.dual_fsm else 0
def get_parameters_sdram_usage_in_bytes(self):
# 2 * 32-bit parameters
# 2 * LUTS with STDP_FIXED_POINT_ONE * 16-bit entries
return (4 * 2) + (2 * (2 * plasticity_helpers.STDP_FIXED_POINT_ONE))
@property
def n_weight_terms(self):
return 1
def write_parameters(self, spec, machine_time_step, weight_scales):
# Write parameters
spec.write_value(data=self.accumulator_depression_plus_one,
data_type=DataType.INT32)
spec.write_value(data=self.accumulator_potentiation_minus_one,
data_type=DataType.INT32)
# Convert mean times into machine timesteps
mean_pre_timesteps = (float(self.mean_pre_window) *
(1000.0 / float(machine_time_step)))
mean_post_timesteps = (float(self.mean_post_window) *
(1000.0 / float(machine_time_step)))
# Write lookup tables
self._write_exp_dist_lut(spec, mean_pre_timesteps)
self._write_exp_dist_lut(spec, mean_post_timesteps)
def _write_exp_dist_lut(self, spec, mean):
for x in range(plasticity_helpers.STDP_FIXED_POINT_ONE):
# Calculate inverse CDF
x_float = float(x) / float(plasticity_helpers.STDP_FIXED_POINT_ONE)
p_float = math.log(1.0 - x_float) * -mean
p = round(p_float)
spec.write_value(data=p, data_type=DataType.UINT16)
@property
def synaptic_structure(self):
return self._synapse_structure | /sPyNNakerExtraModelsPlugin-2016.001.zip/sPyNNakerExtraModelsPlugin-2016.001/spynnaker_extra_pynn_models/neuron/plasticity/stdp/timing_dependence/timing_dependence_recurrent.py | 0.698946 | 0.465145 | timing_dependence_recurrent.py | pypi |
from spynnaker.pyNN.models.neuron.abstract_population_vertex import \
AbstractPopulationVertex
from spynnaker.pyNN.models.neuron.neuron_models\
.neuron_model_leaky_integrate_and_fire \
import NeuronModelLeakyIntegrateAndFire
from spynnaker.pyNN.models.neuron.synapse_types.synapse_type_exponential \
import SynapseTypeExponential
from spynnaker.pyNN.models.neuron.input_types.input_type_conductance \
import InputTypeConductance
from spynnaker_extra_pynn_models.neuron.threshold_types\
.threshold_type_maass_stochastic import ThresholdTypeMaassStochastic
class IFCondExpStoc(AbstractPopulationVertex):
_model_based_max_atoms_per_core = 255
default_parameters = {
'tau_m': 20.0, 'cm': 1.0, 'e_rev_E': 0.0, 'e_rev_I': -70.0,
'v_rest': -65.0, 'v_reset': -65.0,
'v_thresh': -50.0, 'tau_syn_E': 5.0, 'tau_syn_I': 5.0,
'tau_refrac': 0.1, 'i_offset': 0, "du_th": 0.5, "tau_th": 20.0}
def __init__(
self, n_neurons, machine_time_step, timescale_factor,
spikes_per_second=None, ring_buffer_sigma=None, constraints=None,
label=None,
tau_m=default_parameters['tau_m'], cm=default_parameters['cm'],
v_rest=default_parameters['v_rest'],
v_reset=default_parameters['v_reset'],
v_thresh=default_parameters['v_thresh'],
tau_syn_E=default_parameters['tau_syn_E'],
tau_syn_I=default_parameters['tau_syn_I'],
tau_refrac=default_parameters['tau_refrac'],
i_offset=default_parameters['i_offset'],
e_rev_E=default_parameters['e_rev_E'],
e_rev_I=default_parameters['e_rev_I'],
du_th=default_parameters['du_th'],
tau_th=default_parameters['tau_th'], v_init=None):
neuron_model = NeuronModelLeakyIntegrateAndFire(
n_neurons, machine_time_step, v_init, v_rest, tau_m, cm, i_offset,
v_reset, tau_refrac)
synapse_type = SynapseTypeExponential(
n_neurons, machine_time_step, tau_syn_E, tau_syn_I)
input_type = InputTypeConductance(n_neurons, e_rev_E, e_rev_I)
threshold_type = ThresholdTypeMaassStochastic(
n_neurons, du_th, tau_th, v_thresh)
AbstractPopulationVertex.__init__(
self, n_neurons=n_neurons, binary="IF_cond_exp_stoc.aplx",
label=label,
max_atoms_per_core=IFCondExpStoc._model_based_max_atoms_per_core,
machine_time_step=machine_time_step,
timescale_factor=timescale_factor,
spikes_per_second=spikes_per_second,
ring_buffer_sigma=ring_buffer_sigma,
model_name="IF_cond_exp_stoc", neuron_model=neuron_model,
input_type=input_type, synapse_type=synapse_type,
threshold_type=threshold_type, constraints=constraints)
@staticmethod
def set_model_max_atoms_per_core(new_value):
IFCondExpStoc._model_based_max_atoms_per_core = new_value | /sPyNNakerExtraModelsPlugin-2016.001.zip/sPyNNakerExtraModelsPlugin-2016.001/spynnaker_extra_pynn_models/neuron/builds/if_cond_exp_stoc.py | 0.749729 | 0.468912 | if_cond_exp_stoc.py | pypi |
from spynnaker.pyNN.models.neuron.abstract_population_vertex import \
AbstractPopulationVertex
from spynnaker.pyNN.models.neuron.neuron_models\
.neuron_model_leaky_integrate_and_fire \
import NeuronModelLeakyIntegrateAndFire
from spynnaker.pyNN.models.neuron.input_types.input_type_current \
import InputTypeCurrent
from spynnaker.pyNN.models.neuron.threshold_types.threshold_type_static \
import ThresholdTypeStatic
from spynnaker_extra_pynn_models.neuron.synapse_types.synapse_type_delta \
import SynapseTypeDelta
class IFCurrDelta(AbstractPopulationVertex):
""" Leaky integrate and fire neuron with an instantaneous \
current input
"""
_model_based_max_atoms_per_core = 255
default_parameters = {
'tau_m': 20.0, 'cm': 1.0, 'v_rest': -65.0, 'v_reset': -65.0,
'v_thresh': -50.0, 'tau_refrac': 0.1, 'i_offset': 0}
# noinspection PyPep8Naming
def __init__(
self, n_neurons, machine_time_step, timescale_factor,
spikes_per_second=None, ring_buffer_sigma=None,
incoming_spike_buffer_size=None,
constraints=None, label=None,
tau_m=default_parameters['tau_m'], cm=default_parameters['cm'],
v_rest=default_parameters['v_rest'],
v_reset=default_parameters['v_reset'],
v_thresh=default_parameters['v_thresh'],
tau_refrac=default_parameters['tau_refrac'],
i_offset=default_parameters['i_offset'], v_init=None):
neuron_model = NeuronModelLeakyIntegrateAndFire(
n_neurons, machine_time_step, v_init, v_rest, tau_m, cm, i_offset,
v_reset, tau_refrac)
synapse_type = SynapseTypeDelta()
input_type = InputTypeCurrent()
threshold_type = ThresholdTypeStatic(n_neurons, v_thresh)
AbstractPopulationVertex.__init__(
self, n_neurons=n_neurons, binary="IF_curr_delta.aplx",
label=label,
max_atoms_per_core=IFCurrDelta._model_based_max_atoms_per_core,
machine_time_step=machine_time_step,
timescale_factor=timescale_factor,
spikes_per_second=spikes_per_second,
ring_buffer_sigma=ring_buffer_sigma,
incoming_spike_buffer_size=incoming_spike_buffer_size,
model_name="IF_curr_delta", neuron_model=neuron_model,
input_type=input_type, synapse_type=synapse_type,
threshold_type=threshold_type, constraints=constraints)
@staticmethod
def set_model_max_atoms_per_core(new_value):
"""
:param new_value:
:return:
"""
IFCurrDelta._model_based_max_atoms_per_core = new_value | /sPyNNakerExtraModelsPlugin-2016.001.zip/sPyNNakerExtraModelsPlugin-2016.001/spynnaker_extra_pynn_models/neuron/builds/if_curr_delta.py | 0.751283 | 0.52543 | if_curr_delta.py | pypi |
from spynnaker.pyNN.models.neuron.abstract_population_vertex import \
AbstractPopulationVertex
from spynnaker.pyNN.models.neuron.neuron_models\
.neuron_model_leaky_integrate_and_fire \
import NeuronModelLeakyIntegrateAndFire
from spynnaker.pyNN.models.neuron.synapse_types.synapse_type_exponential \
import SynapseTypeExponential
from spynnaker.pyNN.models.neuron.input_types.input_type_current \
import InputTypeCurrent
from spynnaker.pyNN.models.neuron.threshold_types.threshold_type_static \
import ThresholdTypeStatic
from spynnaker_extra_pynn_models.neuron\
.additional_inputs.additional_input_ca2_adaptive \
import AdditionalInputCa2Adaptive
class IFCurrExpCa2Adaptive(AbstractPopulationVertex):
""" Model from Liu, Y. H., & Wang, X. J. (2001). Spike-frequency\
adaptation of a generalized leaky integrate-and-fire model neuron. \
Journal of Computational Neuroscience, 10(1), 25-45. \
doi:10.1023/A:1008916026143
"""
_model_based_max_atoms_per_core = 255
default_parameters = {
'tau_m': 20.0, 'cm': 1.0, 'v_rest': -65.0, 'v_reset': -65.0,
'v_thresh': -50.0, 'tau_syn_E': 5.0, 'tau_syn_I': 5.0,
'tau_refrac': 0.1, 'i_offset': 0,
'tau_ca2': 50.0, "i_ca2": 0.0, "i_alpha": 0.1}
def __init__(
self, n_neurons, machine_time_step, timescale_factor,
spikes_per_second=None, ring_buffer_sigma=None,
incoming_spike_buffer_size=None,
constraints=None, label=None,
tau_m=default_parameters['tau_m'], cm=default_parameters['cm'],
v_rest=default_parameters['v_rest'],
v_reset=default_parameters['v_reset'],
v_thresh=default_parameters['v_thresh'],
tau_syn_E=default_parameters['tau_syn_E'],
tau_syn_I=default_parameters['tau_syn_I'],
tau_refrac=default_parameters['tau_refrac'],
i_offset=default_parameters['i_offset'],
tau_ca2=default_parameters["tau_ca2"],
i_ca2=default_parameters["i_ca2"],
i_alpha=default_parameters["i_alpha"], v_init=None):
neuron_model = NeuronModelLeakyIntegrateAndFire(
n_neurons, machine_time_step, v_init, v_rest, tau_m, cm, i_offset,
v_reset, tau_refrac)
synapse_type = SynapseTypeExponential(
n_neurons, machine_time_step, tau_syn_E, tau_syn_I)
input_type = InputTypeCurrent()
threshold_type = ThresholdTypeStatic(n_neurons, v_thresh)
additional_input = AdditionalInputCa2Adaptive(
n_neurons, machine_time_step, tau_ca2, i_ca2, i_alpha)
AbstractPopulationVertex.__init__(
self, n_neurons=n_neurons, binary="IF_curr_exp_ca2_adaptive.aplx",
label=label,
max_atoms_per_core=
IFCurrExpCa2Adaptive._model_based_max_atoms_per_core,
machine_time_step=machine_time_step,
timescale_factor=timescale_factor,
spikes_per_second=spikes_per_second,
ring_buffer_sigma=ring_buffer_sigma,
incoming_spike_buffer_size=incoming_spike_buffer_size,
model_name="IF_curr_exp_ca2_adaptive", neuron_model=neuron_model,
input_type=input_type, synapse_type=synapse_type,
threshold_type=threshold_type, additional_input=additional_input,
constraints=constraints)
@staticmethod
def set_model_max_atoms_per_core(new_value):
"""
:param new_value:
:return:
"""
IFCurrExpCa2Adaptive._model_based_max_atoms_per_core = new_value | /sPyNNakerExtraModelsPlugin-2016.001.zip/sPyNNakerExtraModelsPlugin-2016.001/spynnaker_extra_pynn_models/neuron/builds/if_curr_exp_ca2_adaptive.py | 0.7797 | 0.563918 | if_curr_exp_ca2_adaptive.py | pypi |
from spynnaker.pyNN.utilities import utility_calls
from spynnaker.pyNN.models.neural_properties.neural_parameter \
import NeuronParameter
from data_specification.enums.data_type import DataType
from spynnaker.pyNN.models.neuron.additional_inputs.abstract_additional_input \
import AbstractAdditionalInput
import numpy
class AdditionalInputCa2Adaptive(AbstractAdditionalInput):
def __init__(self, n_neurons, machine_time_step, tau_ca2, i_ca2, i_alpha):
AbstractAdditionalInput.__init__(self)
self._n_neurons = n_neurons
self._machine_time_step = machine_time_step
self._tau_ca2 = utility_calls.convert_param_to_numpy(
tau_ca2, n_neurons)
self._i_ca2 = utility_calls.convert_param_to_numpy(
i_ca2, n_neurons)
self._i_alpha = utility_calls.convert_param_to_numpy(
i_alpha, n_neurons)
@property
def tau_ca2(self):
return self._tau_ca2
@tau_ca2.setter
def tau_ca2(self, tau_ca2):
self._tau_ca2 = utility_calls.convert_param_to_numpy(
tau_ca2, self._n_neurons)
@property
def i_ca2(self):
return self._i_ca2
@i_ca2.setter
def i_ca2(self, i_ca2):
self._i_ca2 = utility_calls.convert_param_to_numpy(
i_ca2, self._n_neurons)
@property
def i_alpha(self):
return self._i_alpha
@i_alpha.setter
def i_alpha(self, i_alpha):
self._i_alpha = utility_calls.convert_param_to_numpy(
i_alpha, self._n_neurons)
@property
def _exp_tau_ca2(self):
return numpy.exp(float(-self._machine_time_step) /
(1000.0 * self._tau_ca2))
def get_n_parameters(self):
return 3
def get_parameters(self):
return [
NeuronParameter(self._exp_tau_ca2, DataType.S1615),
NeuronParameter(self._i_ca2, DataType.S1615),
NeuronParameter(self._i_alpha, DataType.S1615)
]
def get_n_cpu_cycles_per_neuron(self):
return 3
def get_dtcm_usage_per_neuron_in_bytes(self):
return 12
def get_sdram_usage_per_neuron_in_bytes(self):
return 12 | /sPyNNakerExtraModelsPlugin-2016.001.zip/sPyNNakerExtraModelsPlugin-2016.001/spynnaker_extra_pynn_models/neuron/additional_inputs/additional_input_ca2_adaptive.py | 0.781622 | 0.456531 | additional_input_ca2_adaptive.py | pypi |
from contextlib import contextmanager
try:
# this fails in <=2020 versions of Python on OS X 11.x
import OpenGL.GL # noqa: F401
except ImportError:
# Hack for macOS Big Sur
from ._bigsurhack import patch_ctypes
patch_ctypes()
import OpenGL.GL as GL
# pylint: disable=invalid-name
blend = GL.GL_BLEND
color_buffer_bit = GL.GL_COLOR_BUFFER_BIT
depth_buffer_bit = GL.GL_DEPTH_BUFFER_BIT
line_smooth = GL.GL_LINE_SMOOTH
lines = GL.GL_LINES
model_view = GL.GL_MODELVIEW
one_minus_src_alpha = GL.GL_ONE_MINUS_SRC_ALPHA
points = GL.GL_POINTS
projection = GL.GL_PROJECTION
smooth = GL.GL_SMOOTH
src_alpha = GL.GL_SRC_ALPHA
depth_test = GL.GL_DEPTH_TEST
rgb = GL.GL_RGB
unsigned_byte = GL.GL_UNSIGNED_BYTE
# pylint: enable=invalid-name
def blend_function(sfactor, dfactor):
""" Set the blending function. """
GL.glBlendFunc(sfactor, dfactor)
def clear(mask):
""" Clear the drawing surface. """
GL.glClear(mask)
def clear_color(red, green, blue, alpha=1.0):
""" Clear the surface to the given colour. """
GL.glClearColor(float(red), float(green), float(blue), float(alpha))
def color(*args):
""" Set the drawing colour. """
GL.glColor(*args)
def disable(*args):
""" Disable the listed features. """
for feature in args:
GL.glDisable(feature)
def enable(*args):
""" Enable the listed features. """
for feature in args:
GL.glEnable(feature)
def line_width(width):
""" Set the line width. """
GL.glLineWidth(float(width))
def load_identity():
""" Load the identity matrix. """
GL.glLoadIdentity()
def matrix_mode(mode):
""" Set the matrix mode. """
GL.glMatrixMode(mode)
def orthographic_projction(*args):
""" Set an orthographic (non-perspective) projection. """
GL.glOrtho(*args)
def point_size(size):
""" Set the size of points. """
GL.glPointSize(float(size))
def raster_position(*args):
""" Set the raster position. """
GL.glRasterPos(*args)
def rotate(angle, x, y, z):
""" Rotate the projection about a point. """
GL.glRotatef(angle, x, y, z)
def scale(x, y, z):
""" Scale the projection about the origin. """
GL.glScale(x, y, z)
def shade_model(mode):
""" Set the shading model. """
GL.glShadeModel(mode)
def translate(x, y, z):
""" Translate the projection. """
GL.glTranslate(x, y, z)
def vertex(*args):
""" Mark a vertex of a drawing path. """
GL.glVertex(*args)
def viewport(x, y, width, height):
""" Set up the view port. """
GL.glViewport(int(x), int(y), int(width), int(height))
def draw_pixels(*args):
GL.glDrawPixels(*args)
@contextmanager
def draw(drawing_style):
""" Draw a line, set of points or closed curve (depending on\
drawing_style). Use as a context manager and specify the vertices of\
the path in the body of the context.
"""
GL.glBegin(drawing_style)
yield
GL.glEnd()
@contextmanager
def save_matrix():
""" Manipulate the view matrix in a temporary context; the view matrix is\
restored once this context is left.
"""
GL.glPushMatrix()
yield
GL.glPopMatrix() | /sPyNNaker_visualisers-1!6.0.0.tar.gz/sPyNNaker_visualisers-1!6.0.0/spynnaker_visualisers/opengl_support.py | 0.813794 | 0.477311 | opengl_support.py | pypi |
from datetime import datetime
import os
import traceback
import OpenGL.error
from spinn_utilities.abstract_base import AbstractBase, abstractmethod
from spynnaker_visualisers.opengl_support import (
viewport, save_matrix, enable, blend, line_smooth, disable, line_width,
blend_function, src_alpha, one_minus_src_alpha, rotate, scale, translate,
raster_position)
try:
# this fails in <=2020 versions of Python on OS X 11.x
import OpenGL.GLUT # noqa: F401
except ImportError:
# Hack for macOS Big Sur
from ._bigsurhack import patch_ctypes
patch_ctypes()
import OpenGL.GLUT as GLUT
keyUp = GLUT.GLUT_KEY_UP
keyDown = GLUT.GLUT_KEY_DOWN
keyLeft = GLUT.GLUT_KEY_LEFT
keyRight = GLUT.GLUT_KEY_RIGHT
displayModeDouble = GLUT.GLUT_DOUBLE
class _PerformanceTimer(object):
__slots__ = [
"_stamp_1", "_stamp_2", "_stopped"]
@staticmethod
def _now():
return datetime.now()
def __init__(self):
self._stopped = True
self._stamp_1 = 0
self._stamp_2 = 0
def start(self):
""" Start the timer. """
self._stopped = False
self._stamp_1 = _PerformanceTimer._now()
def stop(self):
""" Stop the timer. """
self._stamp_2 = _PerformanceTimer._now()
self._stopped = True
@property
def stopped(self):
""" Is the timer stopped? """
return self._stopped
@property
def elapsed_milliseconds(self):
""" How long elapsed in the last timing run? In milliseconds.
..note::
Only valid when the timer has previously been run and is currently\
stopped.
"""
delta = self._stamp_2 - self._stamp_1
return float(delta.seconds) * 1000 + float(delta.microseconds) / 1000
@property
def elapsed_seconds(self):
""" How long elapsed in the last timing run? In seconds.
..note::
Only valid when the timer has previously been run and is currently\
stopped.
"""
delta = self._stamp_2 - self._stamp_1
return float(delta.seconds) + float(delta.microseconds) / 1000000
class GlutFramework(object, metaclass=AbstractBase):
''' Base for code that wants to visualise using an OpenGL surface.
'''
# pylint: disable=broad-except
__slots__ = [
"display_timer",
"elapsed_time_in_seconds",
"frame_rate_timer",
"frame_time",
"frame_time_elapsed",
"_logged_errors",
"window"]
def __init__(self):
self.window = None
self.frame_time_elapsed = 0.0
self.frame_time = 0.0
self.frame_rate_timer = _PerformanceTimer()
self.display_timer = _PerformanceTimer()
self.elapsed_time_in_seconds = 0.0
self._logged_errors = set()
def start_framework(self, args, title, width, height, posx, posy, fps, *,
display_mode=GLUT.GLUT_RGB | GLUT.GLUT_DOUBLE):
""" start_framework will initialize framework and start the GLUT run\
loop. It must be called after the GlutFramework class is created\
to start the application.
Not expected to return.
"""
# Sets the instance to this, used in the callback wrapper functions
self.frame_time = 1.0 / fps * 1000.0
# Initialize GLUT
GLUT.glutInit(args)
GLUT.glutInitDisplayMode(display_mode)
GLUT.glutInitWindowSize(width, height)
GLUT.glutInitWindowPosition(posx, posy)
self.window = GLUT.glutCreateWindow(title)
try:
GLUT.glutSetOption(GLUT.GLUT_ACTION_ON_WINDOW_CLOSE,
GLUT.GLUT_ACTION_CONTINUE_EXECUTION)
except OpenGL.error.NullFunctionError:
pass
self.init() # Initialize
# Function callbacks with wrapper functions
GLUT.glutDisplayFunc(self.__display_framework)
GLUT.glutReshapeFunc(self.__reshape_framework)
GLUT.glutIdleFunc(self.__run)
GLUT.glutMouseFunc(self.__mouse_button_press)
GLUT.glutMotionFunc(self.__mouse_move)
GLUT.glutKeyboardFunc(self.__keyboard_down)
GLUT.glutKeyboardUpFunc(self.__keyboard_up)
GLUT.glutSpecialFunc(self.__special_keyboard_down)
GLUT.glutSpecialUpFunc(self.__special_keyboard_up)
try:
GLUT.glutCloseFunc(self._terminate)
except OpenGL.error.NullFunctionError:
GLUT.glutWMCloseFunc(self._terminate)
GLUT.glutMainLoop()
def init(self):
""" Initialises GLUT and registers any extra callback functions.
"""
@abstractmethod
def display(self, dTime):
""" The display function is called at a specified frames-per-second\
(FPS). Any animation drawing code can be run in the display method.
:param dTime: the change in time (seconds)
"""
def reshape(self, width, height):
""" Called when the window dimensions change.
:param width: the width of the window in pixels
:param height: the height of the window in pixels
"""
viewport(0, 0, width, height)
def mouse_button_press(self, button, state, x, y):
""" Called when the mouse buttons are pressed.
:param button: the mouse buttons
:param state: the state of the buttons
:param x: the x coordinate
:param y: the y coordinate
"""
def mouse_move(self, x, y):
""" Called when the mouse moves on the screen.
:param x: the x coordinate
:param y: the y coordinate
"""
def keyboard_down(self, key, x, y):
""" The keyboard function is called when a standard key is pressed\
down.
:param key: the key press
:param x: the x coordinate of the mouse
:param y: the y coordinate of the mouse
"""
def keyboard_up(self, key, x, y):
""" The keyboard function is called when a standard key is "unpressed".
:param key: the key press
:param x: the x coordinate of the mouse
:param y: the y coordinate of the mouse
"""
def special_keyboard_down(self, key, x, y):
""" The keyboard function is called when a special key is pressed down\
(F1 keys, Home, Inser, Delete, Page Up/Down, End, arrow keys).\
http://www.opengl.org/resources/libraries/glut/spec3/node54.html
:param key: the key press
:param x: the x coordinate of the mouse
:param y: the y coordinate of the mouse
"""
def special_keyboard_up(self, key, x, y):
""" The keyboard function is called when a special key is "unpressed"\
(F1 keys, Home, Inser, Delete, Page Up/Down, End, arrow keys).
:param key: the key press
:param x: the x coordinate of the mouse
:param y: the y coordinate of the mouse
"""
def run(self):
""" The run method is called by GLUT and contains the logic to set the\
frame rate of the application.
"""
if self.frame_rate_timer.stopped:
self.frame_rate_timer.start()
# stop the timer and calculate time since last frame
self.frame_rate_timer.stop()
milliseconds = self.frame_rate_timer.elapsed_milliseconds
self.frame_time_elapsed += milliseconds
if self.frame_time_elapsed >= self.frame_time:
# If the time exceeds a certain "frame rate" then show the next
# frame
GLUT.glutPostRedisplay()
# remove a "frame" and start counting up again
self.frame_time_elapsed -= self.frame_time
self.frame_rate_timer.start()
def display_framework(self):
""" The display_framework() function sets up initial GLUT state and\
calculates the change in time between each frame. It calls the\
display(float) function, which can be subclassed.
"""
if self.display_timer.stopped:
self.display_timer.start()
self.display_timer.stop()
elapsedTimeInSeconds = self.display_timer.elapsed_seconds
if GLUT.glutGetWindow() == self.window:
self.display(elapsedTimeInSeconds)
GLUT.glutSwapBuffers()
self.display_timer.start()
def reshape_framework(self, width, height):
""" Handle resizing of the window.
"""
if GLUT.glutGetWindow() == self.window:
self.reshape(width, height)
@staticmethod
def write_large(x, y, string, *args):
""" Utility function: write a string to a given location as a bitmap.
"""
# pylint: disable=no-member
if args:
string = string % args
raster_position(x, y)
for ch in string:
GLUT.glutBitmapCharacter(GLUT.GLUT_BITMAP_TIMES_ROMAN_24, ord(ch))
@staticmethod
def write_small(x, y, size, rotation, string, *args):
""" Utility function: write a string to a given location as a strokes.
"""
# pylint: disable=no-member
if args:
string = string % args
with save_matrix():
# antialias the font
enable(blend, line_smooth)
blend_function(src_alpha, one_minus_src_alpha)
line_width(1.5)
translate(x, y, 0.0)
scale(size, size, size)
rotate(rotation, 0.0, 0.0, 1.0)
for ch in string:
GLUT.glutStrokeCharacter(GLUT.GLUT_STROKE_ROMAN, ord(ch))
disable(blend, line_smooth)
@staticmethod
def _terminate(exit_code=0):
"""
Because sys.exit() doesn't always work in the ctype-handled callbacks.
"""
os._exit(exit_code)
def __display_framework(self):
if not GLUT.glutGetWindow():
return
try:
return self.display_framework()
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __reshape_framework(self, width, height):
if not GLUT.glutGetWindow():
return
try:
return self.reshape_framework(width, height)
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __run(self):
if not GLUT.glutGetWindow():
return
try:
return self.run()
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __mouse_button_press(self, button, state, x, y):
if not GLUT.glutGetWindow():
return
try:
return self.mouse_button_press(button, state, x, y)
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __mouse_move(self, x, y):
if not GLUT.glutGetWindow():
return
try:
return self.mouse_move(x, y)
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __keyboard_down(self, key, x, y):
if not GLUT.glutGetWindow():
return
try:
return self.keyboard_down(key.decode(), x, y)
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __keyboard_up(self, key, x, y):
if not GLUT.glutGetWindow():
return
try:
return self.keyboard_up(key.decode(), x, y)
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __special_keyboard_down(self, key, x, y):
if not GLUT.glutGetWindow():
return
try:
return self.special_keyboard_down(key, x, y)
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __special_keyboard_up(self, key, x, y):
if not GLUT.glutGetWindow():
return
try:
return self.special_keyboard_up(key, x, y)
except Exception:
self.__log_error()
except SystemExit:
self._terminate()
def __log_error(self):
tb = traceback.format_exc()
if tb not in self._logged_errors:
self._logged_errors.add(tb)
traceback.print_exc() | /sPyNNaker_visualisers-1!6.0.0.tar.gz/sPyNNaker_visualisers-1!6.0.0/spynnaker_visualisers/glut_framework.py | 0.802323 | 0.279624 | glut_framework.py | pypi |
# encoding: utf-8
""" A live plotter for the sPyNNaker Sudoku network.
"""
from argparse import ArgumentParser, REMAINDER
import sys
from threading import Condition, RLock
from spinn_utilities.overrides import overrides
from spinn_front_end_common.utilities.connections import LiveEventConnection
from spynnaker_visualisers.glut_framework import GlutFramework
from spynnaker_visualisers.opengl_support import (
vertex, draw, lines, color, point_size, points, line_width, clear_color,
clear, color_buffer_bit, load_identity, viewport, matrix_mode, projection,
model_view, orthographic_projction, shade_model, smooth)
__all__ = []
__version__ = 1
__date__ = '2017-07-25'
WINDOW_BORDER = 110
INIT_WINDOW_WIDTH = 800
INIT_WINDOW_HEIGHT = 600
INIT_WINDOW_X = 100
INIT_WINDOW_Y = 100
FRAMES_PER_SECOND = 10
class SudokuPlot(GlutFramework):
""" A live plotter for the sPyNNaker Sudoku network.
"""
__slots__ = [
"args",
"cell_id",
"cell_labels",
"cell_size_map",
"database_read",
"label_to_cell_map",
"latest_time",
"ms_per_bin",
"n_neurons",
"n_populations_to_read",
"neurons_per_number",
"plot_time_ms",
"point_mutex",
"points_to_draw",
"simulation_started",
"start_condition",
"timestep_ms",
"user_pressed_start",
"window_height",
"window_width"]
def __init__(self, args, neurons_per_number, ms_per_bin, wait_for_start):
"""
:param args:
Arguments (relating to the display) to pass through to GLUT
:param neurons_per_number:
How many neurons are used per number in the Sudoku cells
:param ms_per_bin:
How long does a sampling period last
:param wait_for_start:
Whether the system should wait for the SpiNNaker simulation to\
boot (probably yes!)
"""
super(SudokuPlot, self).__init__()
self.window_width = INIT_WINDOW_WIDTH
self.window_height = INIT_WINDOW_HEIGHT
self.cell_id = 0
self.user_pressed_start = not wait_for_start
self.simulation_started = False
self.database_read = False
self.n_neurons = 0
self.timestep_ms = 0
self.plot_time_ms = 0
self.ms_per_bin = float(ms_per_bin)
self.latest_time = 0.0
self.neurons_per_number = neurons_per_number
self.n_populations_to_read = 1
self.args = args
self.points_to_draw = [[] for _ in range(81)]
self.point_mutex = RLock()
self.label_to_cell_map = dict()
self.cell_size_map = dict()
self.cell_labels = dict()
self.start_condition = Condition()
@overrides(GlutFramework.init)
def init(self):
clear_color(0.0, 0.0, 0.0, 1.0)
color(1.0, 1.0, 1.0)
shade_model(smooth)
def connect_callbacks(self, connection, label):
""" Arrange so that labels on the given connection report their\
goings-on to this class.
:type connection: LiveEventConnection
:type label: str
"""
connection.add_init_callback(label, self._init_cb)
connection.add_receive_callback(label, self._receive_cb)
connection.add_start_resume_callback(label, self._start_cb)
def _init_cb(self, label, n_neurons, run_time_ms, machine_time_step_ms):
self.plot_time_ms = float(run_time_ms)
self.timestep_ms = float(machine_time_step_ms)
self.cell_labels[self.cell_id] = label
self.cell_size_map[self.cell_id] = n_neurons
self.label_to_cell_map[label] = self.cell_id
self.n_neurons += n_neurons
self.cell_id += 1
with self.start_condition:
self.n_populations_to_read -= 1
if self.n_populations_to_read <= 0:
self.database_read = True
while not self.user_pressed_start:
self.start_condition.wait()
def _start_cb(self, *args): # @UnusedVariable
with self.start_condition:
self.simulation_started = True
def _receive_cb(self, label, time, spikes=None): # @UnusedVariable
if spikes is None:
spikes = []
with self.point_mutex:
for spike in spikes:
cell_id, neuron_id = divmod(
spike, self.neurons_per_number * 9)
self.points_to_draw[cell_id].append((time, neuron_id))
time_ms = time * self.timestep_ms
if time_ms > self.latest_time:
self.latest_time = time_ms
def main_loop(self):
""" Run the GUI.
"""
self.start_framework(
self.args, "Sudoku", self.window_width, self.window_height,
INIT_WINDOW_X, INIT_WINDOW_Y, FRAMES_PER_SECOND)
@overrides(GlutFramework.display)
def display(self, dTime): # @UnusedVariable
self._start_display()
cell_width = (self.window_width - 2 * WINDOW_BORDER) / 9.0
cell_height = (self.window_height - 2 * WINDOW_BORDER) / 9.0
end = self.latest_time
start = end - self.ms_per_bin
if start < 0.0:
start = 0.0
end = start + self.ms_per_bin
with self.start_condition:
if not self.database_read:
prompt = "Waiting for simulation to load..."
elif not self.user_pressed_start:
prompt = "Press space bar to start..."
elif not self.simulation_started:
prompt = "Waiting for simulation to start..."
else:
prompt = "Sudoku"
self._print_text(prompt)
self._draw_cells(cell_width, cell_height)
if self.timestep_ms != 0:
x_spacing = cell_width / ((end - start) / self.timestep_ms)
start_tick = int(start / self.timestep_ms)
with self.point_mutex:
values, probs = self._find_cell_values(start_tick)
valid = self._find_cell_correctness(values)
self._draw_cell_contents(values, valid, probs, start_tick,
x_spacing, cell_width, cell_height)
@overrides(GlutFramework.reshape)
def reshape(self, width, height):
self.window_width = width
self.window_height = height
# Viewport dimensions
viewport(0, 0, width, height)
matrix_mode(projection)
load_identity()
# An orthographic projection. Should probably look into OpenGL
# perspective projections for 3D if that's your thing
orthographic_projction(0.0, width, 0.0, height, -50.0, 50.0)
matrix_mode(model_view)
load_identity()
@overrides(GlutFramework.keyboard_down)
def keyboard_down(self, key, x, y): # @UnusedVariable
if key == 32 or key == ' ':
with self.start_condition:
if not self.user_pressed_start:
print("Starting the simulation")
self.user_pressed_start = True
self.start_condition.notify_all()
def _find_cell_values(self, start_tick):
cell_value = [0] * 81
cell_prob = [0.0] * 81
for cell in range(81):
# Strip off items that are no longer needed
queue = self.points_to_draw[cell]
while queue and queue[0][0] < start_tick:
queue.pop(0)
# Count the spikes per number
count, total = self._count_spikes_per_number(queue)
# Work out the probability of a given number in a given cell
max_prob_number = 0
max_prob = 0.0
for i in range(9):
if count[i] > 0:
prob = count[i] / total
if prob > max_prob:
max_prob = prob
max_prob_number = i + 1
cell_value[cell] = max_prob_number
cell_prob[cell] = max_prob
return cell_value, cell_prob
def _count_spikes_per_number(self, queue):
count = [0] * 9
total = 0
for (_, n_id) in queue:
number = n_id // self.neurons_per_number
if number < 9:
count[number] += 1
total += 1
else:
sys.stderr.write(f"Neuron id {n_id} out of range\n")
return count, float(total)
def _find_cell_correctness(self, values):
# Work out the correctness of each cell
cell_valid = [True] * 81
for cell in range(81):
y, x = divmod(cell, 9)
for row in range(9):
if row != y:
self._check_cell(values, cell_valid, x, y, row, x)
for col in range(9):
if col != x:
self._check_cell(values, cell_valid, x, y, y, col)
for row in range(3 * (y // 3), 3 * (y // 3 + 1)):
for col in range(3 * (x // 3), 3 * (x // 3 + 1)):
if x != col and y != row:
self._check_cell(values, cell_valid, x, y, row, col)
return cell_valid
@staticmethod
def _start_display():
point_size(1.0)
clear(color_buffer_bit)
clear_color(1.0, 1.0, 1.0, 1.0)
color(0.0, 0.0, 0.0, 1.0)
def _print_text(self, prompt): # FIXME positioning
# Guesstimate of length of prompt in pixels
plen = len(prompt) * 4
self.write_large(
self.window_width / 2 - plen, self.window_height - 50, prompt)
def _draw_cells(self, width, height):
color(0.0, 0.0, 0.0, 1.0)
for i in range(10):
line_width(3.0 if i % 3 == 0 else 1.0)
pos = WINDOW_BORDER + i * height
self._line(self.window_width - WINDOW_BORDER, pos,
WINDOW_BORDER, pos)
pos = WINDOW_BORDER + i * width
self._line(pos, self.window_height - WINDOW_BORDER,
pos, WINDOW_BORDER)
def _draw_cell_contents(self, value, valid, prob, start, x_spacing,
cell_width, cell_height):
# Print the spikes
for cell in range(81):
cell_y, cell_x = divmod(cell, 9)
x_start = WINDOW_BORDER + (cell_x * cell_width) + 1
y_start = WINDOW_BORDER + (cell_y * cell_height) + 1
y_spacing = cell_height / (self.neurons_per_number * 9.0)
# Work out how probable the number is and use this for colouring
cell_sat = 1 - prob[cell]
point_size(2.0)
with draw(points):
if valid[cell]:
color(cell_sat, 1.0, cell_sat, 1.0)
else:
color(1.0, cell_sat, cell_sat, 1.0)
for (time, n_id) in self.points_to_draw[cell]:
x_value = (time - start) * x_spacing + x_start
y_value = n_id * y_spacing + y_start
vertex(x_value, y_value)
# Print the number
if value[cell] != 0:
color(0, 0, 0, 1 - cell_sat)
size = 0.005 * cell_height
self.write_small(
x_start + (cell_width / 2.0) - (size * 50.0),
y_start + (cell_height / 2.0) - (size * 50.0),
size, 0, "%d", value[cell])
@staticmethod
def _line(x1, y1, x2, y2):
with draw(lines):
vertex(x1, y1)
vertex(x2, y2)
@staticmethod
def _check_cell(values, correct, x, y, row, col):
value = values[y * 9 + x]
if value == values[row * 9 + col]:
correct[y * 9 + x] = False
def sudoku_visualiser(args, port=19999, neurons=5, ms=100, database=None):
""" Make a visualiser, connecting a LiveEventConnection that listens to a\
population labelled "Cells" to a GLUT GUI.
"""
# Set up the application
cells = ["Cells"]
connection = LiveEventConnection(
"LiveSpikeReceiver", receive_labels=cells, local_port=port)
plotter = SudokuPlot(args, neurons, ms, database is None)
for label in cells:
plotter.connect_callbacks(connection, label)
if database is not None:
# FIXME: This concept not present on Python side!
# connection.set_database(database)
sys.stderr.write("Database setting not currently supported")
plotter.main_loop()
def main(argv=None):
""" The main script.\
Parses command line arguments and launches the visualiser.
"""
program_name = "sudoku_visualiser"
program_version = "v%d" % (__version__)
program_description = "Visualise the SpiNNaker sudoku solver."
program_version_string = '%%prog %s (%s)' % (program_version, __date__)
# setup option parser
parser = ArgumentParser(prog=program_name,
description=program_description)
parser.add_argument(
"-d", "--database", dest="database", metavar="FILE",
help="optional file path to where the database is located, if "
"needed for manual configuration", default=None)
parser.add_argument(
"-m", "--ms_per_bin", dest="ms", metavar="MILLISECONDS",
help="optional number of milliseconds to show at once",
type=float, default=100)
parser.add_argument(
"-n", "--neurons_per_number", dest="neurons",
help="the number of neurons that represent each number in a cell",
metavar="COUNT", type=int, default=5)
parser.add_argument(
"-p", "--hand_shake_port", dest="port", default="19999",
help="optional port which the visualiser will listen to for"
" database hand shaking", metavar="PORT", type=int)
parser.add_argument('--version', action='version',
version=program_version_string)
parser.add_argument("args", nargs=REMAINDER)
# Set up and run the application
try:
if argv is None:
argv = sys.argv[1:]
sudoku_visualiser(**parser.parse_args(argv).__dict__)
return 0
except Exception as e: # pylint: disable=broad-except
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help")
return 2
if __name__ == "__main__":
sys.exit(main()) | /sPyNNaker_visualisers-1!6.0.0.tar.gz/sPyNNaker_visualisers-1!6.0.0/spynnaker_visualisers/sudoku/sudoku_visualiser.py | 0.516108 | 0.233717 | sudoku_visualiser.py | pypi |
import socket
import struct
import sys
import threading
from numpy import dot, cross, array, zeros, cos, sin, uint8, uint32
from numpy.linalg import norm
import spynnaker_visualisers.opengl_support as gl
import spynnaker_visualisers.glut_framework as glut
class RaytraceDrawer(glut.GlutFramework):
__slots__ = (
"_moving", "_strafing", "_turn_down", "_turn_right", "_rolling",
"_height", "_width", "_win_height", "_win_width",
"_viewing_frame", "_received_frame", "_sockfd_input",
"_look", "_up", "_position")
moveAmount = 0.00003
turnAmount = 0.0000003
# Fields of view
VERT_FOV = 50.0
HORIZ_FOV = 60.0
INPUT_PORT_SPINNAKER = 17894
SDP_HEADER = struct.Struct("<HBBBBHHHHIII")
PIXEL_FORMAT = struct.Struct(">HHBBB")
RECV_BUFFER_SIZE = 1500 # Ethernet MTU; SpiNNaker doesn't jumbo
def __init__(self, size=256):
super().__init__()
self._moving = 0
self._strafing = 0
# Turn left is negative
self._turn_right = 0
# Turn up is negative
self._turn_down = 0
self._rolling = 0
self._position = array([-220.0, 50.0, 0.0])
self._look = array([1.0, 0.0, 0.0])
self._up = array([0.0, 1.0, 0.0])
self._height = size
self._width = int(self.HORIZ_FOV * self._height / self.VERT_FOV)
self._win_height = self._height
self._win_width = self._width
self._viewing_frame = zeros(
self._width * self._height * 3, dtype=uint8)
self._received_frame = zeros(
self._width * self._height, dtype=uint32)
self._sockfd_input = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self._sockfd_input.bind(('0.0.0.0', self.INPUT_PORT_SPINNAKER))
def start(self, args):
threading.Thread(target=self._input_thread, daemon=True).start()
self.start_framework(
args, "Path Tracer", self._width, self._height, 0, 0, 10,
display_mode=glut.displayModeDouble)
def init(self):
gl.enable(gl.blend, gl.depth_test)
gl.blend_function(gl.src_alpha, gl.one_minus_src_alpha)
def display(self, dTime):
gl.clear_color(1.0, 1.0, 1.0, 0.001)
gl.clear(gl.color_buffer_bit | gl.depth_buffer_bit)
gl.draw_pixels(
self._win_width, self._win_height, gl.rgb, gl.unsigned_byte,
self._viewing_frame.data)
def reshape(self, width, height):
self._win_width = min(width, self._width)
self._win_height = min(height, self._height)
gl.viewport(0, 0, width, height)
gl.load_identity()
def special_keyboard_down(self, key, x, y): # @UnusedVariable
if key == glut.keyUp:
self._turn_down = -1
elif key == glut.keyDown:
self._turn_down = 1
elif key == glut.keyRight:
self._rolling = -1
elif key == glut.keyLeft:
self._rolling = 1
def special_keyboard_up(self, key, x, y): # @UnusedVariable
if key == glut.keyUp or key == glut.keyDown:
self._turn_down = 0
elif key == glut.keyLeft or key == glut.keyRight:
self._rolling = 0
def keyboard_down(self, key, x, y): # @UnusedVariable
if key == 'w':
self._moving = 1
elif key == 's':
self._moving = -1
elif key == 'a':
self._turn_right = -1
elif key == 'd':
self._turn_right = 1
elif key == 'q':
self._strafing = 1
elif key == 'e':
self._strafing = -1
elif key == '\x1b': # Escape
sys.exit()
def keyboard_up(self, key, x, y): # @UnusedVariable
if key == 'w' or key == 's':
self._moving = 0
elif key == 'a' or key == 'd':
self._turn_right = 0
elif key == 'q' or key == 'e':
self._strafing = 0
@staticmethod
def vector_rotate(rotated, axis, theta):
"""Rotate the first vector around the second"""
# https://gist.github.com/fasiha/6c331b158d4c40509bd180c5e64f7924
par = (dot(rotated, axis) / dot(axis, axis) * axis)
perp = rotated - par
w = cross(axis, perp)
w = w / norm(w)
result = par + perp * cos(theta) + norm(perp) * w * sin(theta)
return result / norm(result)
def calculate_movement(self, dt):
# Forward movement
if self._moving:
self._position += self._look * dt * self.moveAmount * self._moving
right = cross(self._up, self._look)
# Strafing movement
if self._strafing:
self._position += right * dt * self.moveAmount * self._strafing
# To turn left/right, rotate the look vector around the up vector
if self._turn_right:
self._look = self.vector_rotate(
self._look, self._up, dt * self.turnAmount * self._turn_right)
# To turn up/down, rotate the look vector and up vector about the right
# vector
if self._turn_down:
self._look = self.vector_rotate(
self._look, right, dt * self.turnAmount * self._turn_down)
self._up = self.vector_rotate(
self._up, right, dt * self.turnAmount * self._turn_down)
# To roll, rotate the up vector around the look vector
if self._rolling:
self._up = self.vector_rotate(
self._up, self._look, dt * self.turnAmount * self._rolling)
def run(self):
"""Calculate movement ten times a second"""
super().run()
self.calculate_movement(self.frame_time_elapsed * 1000)
def _input_thread(self):
print(
f"Drawer running (listening port: {self.INPUT_PORT_SPINNAKER})...")
while True:
msg = self._sockfd_input.recv(self.RECV_BUFFER_SIZE)
sdp_msg = self.SDP_HEADER.unpack_from(msg)
data = msg[self.SDP_HEADER.size:] # sdp_msg.data
if sdp_msg[7] == 3: # sdp_msg.command
for pixel_datum in self._pixelinfo(
data, sdp_msg[9]): # sdp_msg.arg1
self.process_one_pixel(*pixel_datum)
@classmethod
def _pixelinfo(cls, data, number_of_pixels):
for i in range(number_of_pixels):
yield cls.PIXEL_FORMAT.unpack_from(
data, i * cls.PIXEL_FORMAT.size)
def process_one_pixel(self, x, y, r, g, b):
index = (self._height - y - 1) * self._width + x
if index < self._width * self._height:
ix3 = index * 3
count = self._received_frame[index]
cp1 = count + 1
self._viewing_frame[ix3] = (
(r + count * self._viewing_frame[ix3]) // cp1)
self._viewing_frame[ix3 + 1] = (
(g + count * self._viewing_frame[ix3 + 1]) // cp1)
self._viewing_frame[ix3 + 2] = (
(b + count * self._viewing_frame[ix3 + 2]) // cp1)
self._received_frame[index] += 1
def main(args):
drawer = RaytraceDrawer()
drawer.start(args)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv)) | /sPyNNaker_visualisers-1!6.0.0.tar.gz/sPyNNaker_visualisers-1!6.0.0/spynnaker_visualisers/raytrace/drawer.py | 0.479747 | 0.195786 | drawer.py | pypi |
import codecs
from six import PY3, text_type, binary_type
try:
codecs.lookup_error('surrogateescape')
HAS_SURROGATEESCAPE = True
except LookupError:
HAS_SURROGATEESCAPE = False
_COMPOSED_ERROR_HANDLERS = frozenset((None, 'surrogate_or_escape',
'surrogate_or_strict',
'surrogate_then_replace'))
def to_bytes(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
"""Make sure that a string is a byte string
:arg obj: An object to make sure is a byte string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a text string to
a byte string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the text string is not
encodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. There are three additional error strategies
specifically aimed at helping people to port code. The first two are:
:surrogate_or_strict: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``strict``
:surrogate_or_replace: Will use ``surrogateescape`` if it is a valid
handler, otherwise it will use ``replace``.
Because ``surrogateescape`` was added in Python3 this usually means that
Python3 will use ``surrogateescape`` and Python2 will use the fallback
error handler. Note that the code checks for ``surrogateescape`` when the
module is imported. If you have a backport of ``surrogateescape`` for
Python2, be sure to register the error handler prior to importing this
module.
The last error handler is:
:surrogate_then_replace: Will use ``surrogateescape`` if it is a valid
handler. If encoding with ``surrogateescape`` would traceback,
surrogates are first replaced with a replacement characters
and then the string is encoded using ``replace`` (which replaces
the rest of the nonencodable bytes). If ``surrogateescape`` is
not present it will simply use ``replace``. (Added in Ansible 2.3)
This strategy is designed to never traceback when it attempts
to encode a string.
The default until Ansible-2.2 was ``surrogate_or_replace``
From Ansible-2.3 onwards, the default is ``surrogate_then_replace``.
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the bytes version of that string.
:empty: Return an empty byte string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a byte string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a text string.
.. note:: If passed a byte string, this function does not check that the
string is valid in the specified encoding. If it's important that the
byte string is in the specified encoding do::
encoded_string = to_bytes(to_text(input_string, 'latin-1'), 'utf-8')
.. version_changed:: 2.3
Added the ``surrogate_then_replace`` error handler and made it the default error handler.
"""
if isinstance(obj, binary_type):
return obj
# We're given a text string
# If it has surrogates, we know because it will decode
original_errors = errors
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, text_type):
try:
# Try this first as it's the fastest
return obj.encode(encoding, errors)
except UnicodeEncodeError:
if original_errors in (None, 'surrogate_then_replace'):
# Slow but works
return_string = obj.encode('utf-8', 'surrogateescape')
return_string = return_string.decode('utf-8', 'replace')
return return_string.encode(encoding, 'replace')
raise
# Note: We do these last even though we have to call to_bytes again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return to_bytes('')
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
# python2.4 doesn't have b''
return to_bytes('')
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_bytes\' nonstring parameter' % nonstring)
return to_bytes(value, encoding, errors)
def to_text(obj, encoding='utf-8', errors=None, nonstring='simplerepr'):
"""Make sure that a string is a text string
:arg obj: An object to make sure is a text string. In most cases this
will be either a text string or a byte string. However, with
``nonstring='simplerepr'``, this can be used as a traceback-free
version of ``str(obj)``.
:kwarg encoding: The encoding to use to transform from a byte string to
a text string. Defaults to using 'utf-8'.
:kwarg errors: The error handler to use if the byte string is not
decodable using the specified encoding. Any valid `codecs error
handler <https://docs.python.org/2/library/codecs.html#codec-base-classes>`_
may be specified. We support three additional error strategies
specifically aimed at helping people to port code:
:surrogate_or_strict: Will use surrogateescape if it is a valid
handler, otherwise it will use strict
:surrogate_or_replace: Will use surrogateescape if it is a valid
handler, otherwise it will use replace.
:surrogate_then_replace: Does the same as surrogate_or_replace but
`was added for symmetry with the error handlers in
:func:`ansible.module_utils._text.to_bytes` (Added in Ansible 2.3)
Because surrogateescape was added in Python3 this usually means that
Python3 will use `surrogateescape` and Python2 will use the fallback
error handler. Note that the code checks for surrogateescape when the
module is imported. If you have a backport of `surrogateescape` for
python2, be sure to register the error handler prior to importing this
module.
The default until Ansible-2.2 was `surrogate_or_replace`
In Ansible-2.3 this defaults to `surrogate_then_replace` for symmetry
with :func:`ansible.module_utils._text.to_bytes` .
:kwarg nonstring: The strategy to use if a nonstring is specified in
``obj``. Default is 'simplerepr'. Valid values are:
:simplerepr: The default. This takes the ``str`` of the object and
then returns the text version of that string.
:empty: Return an empty text string
:passthru: Return the object passed in
:strict: Raise a :exc:`TypeError`
:returns: Typically this returns a text string. If a nonstring object is
passed in this may be a different type depending on the strategy
specified by nonstring. This will never return a byte string.
From Ansible-2.3 onwards, the default is `surrogate_then_replace`.
.. version_changed:: 2.3
Added the surrogate_then_replace error handler and made it the default error handler.
"""
if isinstance(obj, text_type):
return obj
if errors in _COMPOSED_ERROR_HANDLERS:
if HAS_SURROGATEESCAPE:
errors = 'surrogateescape'
elif errors == 'surrogate_or_strict':
errors = 'strict'
else:
errors = 'replace'
if isinstance(obj, binary_type):
# Note: We don't need special handling for surrogate_then_replace
# because all bytes will either be made into surrogates or are valid
# to decode.
return obj.decode(encoding, errors)
# Note: We do these last even though we have to call to_text again on the
# value because we're optimizing the common case
if nonstring == 'simplerepr':
try:
value = str(obj)
except UnicodeError:
try:
value = repr(obj)
except UnicodeError:
# Giving up
return u''
elif nonstring == 'passthru':
return obj
elif nonstring == 'empty':
return u''
elif nonstring == 'strict':
raise TypeError('obj must be a string type')
else:
raise TypeError('Invalid value %s for to_text\'s nonstring parameter' % nonstring)
return to_text(value, encoding, errors)
#: :py:func:`to_native`
#: Transform a variable into the native str type for the python version
#:
#: On Python2, this is an alias for
#: :func:`~ansible.module_utils.to_bytes`. On Python3 it is an alias for
#: :func:`~ansible.module_utils.to_text`. It makes it easier to
#: transform a variable into the native str type for the python version
#: the code is running on. Use this when constructing the message to
#: send to exceptions or when dealing with an API that needs to take
#: a native string. Example::
#:
#: try:
#: 1//0
#: except ZeroDivisionError as e:
#: raise MyException('Encountered and error: %s' % to_native(e))
if PY3:
to_native = to_text
else:
to_native = to_bytes | /sa-ansible-container-0.9.3rc2.tar.gz/sa-ansible-container-0.9.3rc2/container/utils/_text.py | 0.766992 | 0.430207 | _text.py | pypi |
import os
from typing import Dict, List, Tuple
import pytorch_lightning as pl
import torch
import wandb
import yaml
from pytorch_lightning.callbacks import LearningRateMonitor, ModelCheckpoint
from pytorch_lightning.loggers import WandbLogger
from sa_app.common.utils import init_model_loggers, parse_args
from sa_app.data.data import InitializeDataset, SentimentIterableDataset
from sa_app.models.model import Model
from sa_app.training.lightning_model_wrapper import LightningModelWrapper
from transformers import AutoConfig, AutoModelForSequenceClassification, AutoTokenizer
def init_model_callbacks(training_params: dict) -> List[LearningRateMonitor | ModelCheckpoint]:
model_checkpoint = ModelCheckpoint(
dirpath=training_params["callbacks"]["dirpath"],
filename="best_model",
monitor=training_params["callbacks"]["monitor_var"],
mode=training_params["callbacks"]["monitor_var_mode"],
save_top_k=3, # Save the top 3 models based on the monitored metric
)
callbacks = [
LearningRateMonitor(logging_interval="step"),
model_checkpoint,
]
return callbacks
def load_model(training_params: Dict) -> Tuple[Dict, AutoModelForSequenceClassification]:
# load HF configs
model_config = AutoConfig.from_pretrained(training_params.get("base-model-name"))
# Load model (downloads from hub for the first time)
model = Model.from_config(training_params.get("train_mode"))
model = model.from_pretrained(training_params.get("base-model-name"), config=model_config)
for params in model.parameters():
params.requires_grad = False
for params in model.classifier.parameters():
params.requires_grad = True
return model_config, model
def get_dataset(
dataset_params: Dict, tokenizer: AutoTokenizer
) -> Tuple[SentimentIterableDataset, SentimentIterableDataset]:
dataset_obj = InitializeDataset(dataset_params)
raw_dataset_file, _ = dataset_obj()
# Load streaming dataset
train_dataset = SentimentIterableDataset(
raw_dataset_file,
tokenizer,
split_type="train",
preprocessors=dataset_params.get("preprocessors"),
)
valid_dataset = SentimentIterableDataset(
raw_dataset_file,
tokenizer,
split_type="valid",
preprocessors=dataset_params.get("preprocessors"),
)
return train_dataset, valid_dataset
def get_trainer(
loggers: List[WandbLogger], devices: List[str] | str, accelerator: str, callbacks: List, training_params: Dict
) -> pl.Trainer:
trainer = pl.Trainer(
logger=loggers,
devices=devices,
accelerator=accelerator,
callbacks=callbacks,
**training_params["trainer"],
)
return trainer
def train(config: dict, device: str, training_params: Dict, dataset_params: Dict, seed: int, inference_params: Dict):
# wandb login
wandb.login(key=os.getenv("WANDB_API_KEY"))
# Global seeding
pl.seed_everything(seed=seed)
model_config, model = load_model(training_params)
tokenizer = AutoTokenizer.from_pretrained(training_params.get("base-model-name"))
train_dataset, valid_dataset = get_dataset(dataset_params, tokenizer)
loggers = init_model_loggers(dataset_params, training_params)
callbacks = init_model_callbacks(training_params)
# PL wrapper
model_wrapped = LightningModelWrapper(
model=model,
optimizer_params=training_params["optimizer"],
lr_scheduler_params=training_params["lr_scheduler"] if "lr_scheduler" in training_params else None,
unique_config=config,
)
# Get available devices
try:
devices = [int(device.split(":")[-1])]
accelerator = "gpu"
except ValueError:
devices = "auto"
accelerator = "cpu"
# Trainer initialization
trainer = get_trainer(loggers, devices, accelerator, callbacks, training_params)
# Initiate trainer
trainer.fit(model=model_wrapped, train_dataloaders=train_dataset, val_dataloaders=valid_dataset, ckpt_path="last")
artifact = wandb.Artifact("best_model_checkpoint", type="trained-model")
artifact.add_file(callbacks[1].best_model_path)
wandb.run.log_artifact(artifact)
wandb.finish()
def main():
args = parse_args()
config = yaml.safe_load(open(args.config, "r"))
device = "cuda" if torch.cuda.is_available() else "cpu"
if args.mode == "train":
train(config=config, device=device, **config)
else:
raise NotImplementedError(f"Method {args.mode} not implemented")
if __name__ == "__main__":
main() | /sa_app-0.0.2-py3-none-any.whl/sa_app/app.py | 0.806662 | 0.378344 | app.py | pypi |
from typing import Dict
import torch
import yaml
from sa_app.common.utils import load_mapping, parse_args
from sa_app.data.data import InitializeDataset
from sa_app.data.data_cleaner import StackedPreprocessor
from transformers import AutoModelForSequenceClassification, AutoTokenizer
class InferenceEngine:
def __init__(self, inference_params: Dict, training_params: Dict, dataset_params: Dict, device: str):
# Set the device to CPU or GPU
self.device = device
# Load the trained model and tokenizer
self.model_path = inference_params["model_dir"]
self.model = AutoModelForSequenceClassification.from_pretrained(self.model_path).to(self.device)
self.tokenizer = AutoTokenizer.from_pretrained(training_params["tokenizer"])
self.preprocessors = StackedPreprocessor(dataset_params["preprocessors"])
dataset_obj = InitializeDataset(dataset_params)
_, label_mapping_path = dataset_obj()
self.label_mapping = load_mapping(label_mapping_path)
def perform_inference(self, sentence):
# Preprocess the input sentence
sentence = self.preprocessors([sentence])[0]
# Tokenize the input sentence
inputs = self.tokenizer(sentence, truncation=True, padding=True, return_tensors="pt")
inputs = inputs.to(self.device)
# Forward pass through the model
outputs = self.model(**inputs)
# Get the predicted labels
predicted_labels = torch.argmax(outputs.logits, dim=1)
return self.label_mapping[predicted_labels.tolist()[0]]
if __name__ == "__main__":
args = parse_args()
config = yaml.safe_load(open(args.config, "r"))
device_in_use = "cuda" if torch.cuda.is_available() else "cpu"
# Example usage
ie_obj = InferenceEngine(
inference_params=config["inference_params"],
training_params=config["training_params"],
dataset_params=config["dataset_params"],
device=device_in_use,
)
input_sentence = "I feel so bad today . Such a bad day :( "
predicted_labels = ie_obj.perform_inference(input_sentence)
print("Predicted labels:", predicted_labels) | /sa_app-0.0.2-py3-none-any.whl/sa_app/inference/inference.py | 0.886439 | 0.282354 | inference.py | pypi |
import os.path
from glob import glob
from typing import Dict, Generator, List, Optional, Tuple
import pandas as pd
import torch
import wandb
from sa_app.data.data_cleaner import StackedPreprocessor
from sa_app.data.kaggle_dataset import get_dataset_length, get_file_names, split_dataset
from torch.utils.data import IterableDataset
from tqdm import tqdm
from transformers import AutoTokenizer
def kaggle_dataset_iterator(file_map: dict, chunk_size=1000, split_type="train") -> pd.DataFrame:
dataset_path = file_map[split_type]
return pd.read_csv(dataset_path, encoding="latin-1", chunksize=chunk_size)
class InitializeDataset:
def __init__(self, dataset_params):
self.dataset_params = dataset_params
def __call__(self, *args, **kwargs) -> Tuple[str, str]:
if self.dataset_params.get("wandb_storage") is not None:
wandb_storage = self.dataset_params.get("wandb_storage")
wandb_user_id = wandb_storage.get("wandb_user_id")
wandb_project_name = wandb_storage.get("wandb_project_name")
wandb_artifact_name = wandb_storage.get("wandb_artifact_name")
wandb_artifact_type = wandb_storage.get("wandb_artifact_type")
wandb_file_type = wandb_storage.get("training_file_type")
wandb_artifact_version = wandb_storage.get("wandb_artifact_version")
labels_mapping_file_name = wandb_storage.get("labels_mapping_file_name")
run = wandb.init(entity=wandb_user_id, project=wandb_project_name, job_type="download_dataset")
artifact = run.use_artifact(
f"{wandb_user_id}/{wandb_project_name}/{wandb_artifact_name}:{wandb_artifact_version}",
type=f"{wandb_artifact_type}",
)
artifact_dir = artifact.download()
assert len(glob(f"{artifact_dir}/*.{wandb_file_type}")) > 0, "CSV file download failed"
csv_file = glob(f"{artifact_dir}/*.{wandb_file_type}")[0]
mapping_file = os.path.join(artifact_dir, labels_mapping_file_name)
assert os.path.isfile(mapping_file) is True, "Label mapping file download failed"
return csv_file, mapping_file
elif self.dataset_params.get("local_storage") is not None:
local_storage = self.dataset_params.get("local_storage")
csv_file = local_storage.get("raw_dataset_file")
mapping_file = local_storage.get("labels_mapping")
return csv_file, mapping_file
else:
raise NotImplementedError(f"Either of wandb_storage or local_storage should be defined in app_cfg.yml")
class SentimentIterableDataset(IterableDataset):
def __init__(
self,
csv_file: str,
tokenizer,
preprocessors: Optional[Dict] = None,
chunk_size: int = 1000,
create_split: bool = False,
split_type: str = "train",
batch_size: int = 8,
max_seq_len: int = 512,
):
self.csv_file = csv_file
self.tokenizer = tokenizer
self.chunk_size = chunk_size
self.create_split = create_split
self.split_type = split_type
self.max_seq_len = max_seq_len
self.batch_size = batch_size
self.preprocessors = StackedPreprocessor(preprocessors)
data_files = get_file_names(self.csv_file)
self.file_map = {"train": data_files[0], "valid": data_files[1], "test": data_files[2]}
if os.path.isfile(self.file_map[split_type]) is False:
print("Splitting the dataset")
split_dataset(self.csv_file)
def __len__(self):
return get_dataset_length(self.csv_file, self.split_type) // self.batch_size
def __iter__(self) -> Generator[Tuple[List[str], Dict[str, List[str]]], None, None]:
for data in kaggle_dataset_iterator(self.file_map, chunk_size=self.chunk_size, split_type=self.split_type):
for i in range(0, len(data), self.batch_size):
# Label mapping is also done here, 0 - negative sentiment, 1 - positive sentiment
labels_minibatch: List[int] = list(
data.iloc[i : i + self.batch_size, 0].apply(lambda x: 0 if x == 0 else 1).values
)
sentences_minibatch: Dict[str, List[str]] = self.tokenizer(
self.preprocessors(list(data.iloc[i : i + self.batch_size, 5].values)),
add_special_tokens=False,
truncation=True,
max_length=self.max_seq_len,
padding=True,
)
labels_tensor = torch.tensor(labels_minibatch)
sentences = {
"input_ids": torch.tensor(sentences_minibatch["input_ids"]),
"attention_mask": torch.tensor(sentences_minibatch["attention_mask"]),
}
yield labels_tensor, sentences
if __name__ == "__main__":
tokenizer = AutoTokenizer.from_pretrained("cardiffnlp/twitter-roberta-base")
raw_dataset_file = (
"/home/ppradhan/Documents/my_learnings/my_uni_stuffs/sa_data_storage/training.1600000"
".processed.noemoticon.csv"
)
# ite = kaggle_dataset_iterator(raw_dataset_file, chunk_size=1000, create_split=False, split_type='train')
dataset = SentimentIterableDataset(raw_dataset_file, tokenizer)
total_dataset_length = len(dataset)
pbar = tqdm(total=total_dataset_length, desc="Processing batches", unit="batch")
for batch in dataset:
labels, sentences = batch
pbar.update(1) | /sa_app-0.0.2-py3-none-any.whl/sa_app/data/data.py | 0.756403 | 0.273486 | data.py | pypi |
import re
from abc import ABC, abstractmethod
from typing import Dict, List, Optional
import spacy
from nltk import SnowballStemmer
class BasePreprocessor(ABC):
@abstractmethod
def __call__(self, text: str) -> str:
raise NotImplementedError
class WhitespaceRemovePreprocessor(BasePreprocessor):
def __call__(self, text: str) -> str:
"""
Basic cleaning :
1. remove any hyphen followed by one or more whitespace
2. removes double white spaces between words
3. removes beginning and trailing whitespace in a string
:param text: input string
:return: str
"""
text = re.sub(r"-\s+", "", text)
text = re.sub(r"\s+", " ", text)
text = re.sub(r"\s$|^\s", "", text)
return text
class MakeLowerCasePreprocessor(BasePreprocessor):
def __call__(self, text: str) -> str:
"""
Makes all string lower case
"""
return text.lower()
class StemPreprocessor(BasePreprocessor):
def __init__(self, language: str = "english"):
self.stemmer = SnowballStemmer(language)
def __call__(self, text: str) -> str:
"""
Stemming operation using nltk
"""
return " ".join([self.stemmer.stem(word) for word in text.split()])
class LemmaPreprocessor(BasePreprocessor):
def __init__(self, model_name: str = "en_core_web_sm"):
self.spacy_model = spacy.load(model_name)
def __call__(self, text: str) -> str:
"""
Lemmatization operation using nltk
"""
return " ".join([token.lemma_ for token in self.spacy_model(text)])
# TODO: Add more preprocessing steps
class StackedPreprocessor(BasePreprocessor):
def __init__(self, preprocessors: Optional[Dict[str, Optional[Dict]]] = None):
if preprocessors is None:
preprocessors = {}
self.preprocessors = [PREPROCESSORS[name](**params) for name, params in preprocessors.items()]
def __call__(self, text_batch: List[str]) -> List[str]:
processed_batch = []
for text in text_batch:
for preprocessor in self.preprocessors:
text = preprocessor(text)
processed_batch.append(text)
return processed_batch
PREPROCESSORS = {
"base_cleaning": WhitespaceRemovePreprocessor,
"lowcase": MakeLowerCasePreprocessor,
"stem": StemPreprocessor,
"lemma": LemmaPreprocessor,
} | /sa_app-0.0.2-py3-none-any.whl/sa_app/data/data_cleaner.py | 0.733929 | 0.184088 | data_cleaner.py | pypi |
from typing import Dict
import pandas as pd
from tqdm import tqdm
def get_label_counts(df: pd.DataFrame) -> Dict[str, int]:
label_count = {}
for i in df:
k, v = list(i[0].value_counts().to_dict().items())[0]
if k not in label_count:
label_count[k] = 0
label_count[k] += v
return label_count
def get_dataset_length(file_name: str, split_type: str) -> int:
data_files = get_file_names(file_name)
file_map = {"train": data_files[0], "valid": data_files[1], "test": data_files[2]}
return sum([len(i) for i in pd.read_csv(file_map[split_type], encoding="latin-1", chunksize=1000)])
def get_file_names(file_name: str) -> tuple[str, str, str]:
file_base_pth, file_ext = file_name.rsplit(".", 1)
train_file = f"{file_base_pth}.train.{file_ext}"
valid_file = f"{file_base_pth}.valid.{file_ext}"
test_file = f"{file_base_pth}.test.{file_ext}"
return train_file, valid_file, test_file
def split_dataset(file: str, train_ratio: float = 0.7, test_ratio: float = 0.15, valid_ratio: float = 0.15) -> bool:
train_file, valid_file, test_file = get_file_names(file)
df = pd.read_csv(file, encoding="latin-1", header=None, chunksize=1000)
# label_count = get_label_counts(df)
total_chunks = sum([len(i) for i in df])
pbar = tqdm(total=total_chunks, desc="Processing chunks", unit="chunk")
for chunk in pd.read_csv(file, encoding="latin-1", header=None, chunksize=1000):
train_idx = int(train_ratio * len(chunk))
valid_idx = int(valid_ratio * len(chunk))
test_idx = int(test_ratio * len(chunk))
chunk.iloc[:train_idx, :].to_csv(train_file, mode="a", header=False, index=False)
chunk.iloc[train_idx : train_idx + valid_idx, :].to_csv(valid_file, mode="a", header=False, index=False)
chunk.iloc[train_idx + valid_idx : train_idx + valid_idx + test_idx, :].to_csv(
test_file, mode="a", header=False, index=False
)
pbar.update(1000)
return True
if __name__ == "__main__":
raw_dataset_file = (
"/home/ppradhan/Documents/my_learnings/my_uni_stuffs/sa_data_storage/training.1600000"
".processed.noemoticon.csv"
)
print(split_dataset(raw_dataset_file)) | /sa_app-0.0.2-py3-none-any.whl/sa_app/data/kaggle_dataset.py | 0.58439 | 0.254497 | kaggle_dataset.py | pypi |
from enum import Enum
from pathlib import Path
from typing import Any, Dict, Optional, Union
import pytorch_lightning as pl
import torch
from sa_app.training.optmizer import LearningRateScheduler, Optimizer
from torch.nn import CrossEntropyLoss
from torchmetrics import Accuracy, MeanMetric, Metric
from transformers import AutoModelForSequenceClassification
class Split(str, Enum):
TRAIN = "train"
VALID = "valid"
TEST = "test"
def __str__(self):
return self.value
class LightningModelWrapper(pl.LightningModule):
def __init__(
self,
model: AutoModelForSequenceClassification,
optimizer_params: Optional[dict] = None,
lr_scheduler_params: Optional[dict] = None,
unique_config: Optional[dict] = None,
):
super().__init__()
self.model = model
# Optional: For Training only
self.optimizer_params = optimizer_params
self.lr_scheduler_params = lr_scheduler_params
self.unique_config = unique_config
self.num_classes = unique_config.get("training_params").get("num_classes")
self.task_name = "multiclass" if self.num_classes > 2 else "binary"
self.loss_fn = CrossEntropyLoss()
self.softmax = torch.nn.Softmax(dim=1)
self.train_loss = MeanMetric()
self.train_acc = Accuracy(task=self.task_name, num_classes=self.num_classes)
self.valid_loss = MeanMetric()
self.valid_acc = Accuracy(task=self.task_name, num_classes=self.num_classes)
def forward(self, x):
labels, sentence_batch = x
[sentence_batch[inp_key].to(self.model.device) for inp_key, value in sentence_batch.items()]
output = self.model(**sentence_batch)
output.logits = self.softmax(output.logits)
output.loss = self.loss_fn(output.logits, labels)
return {"logits": output.logits, "loss": output.loss}
def training_step(self, batch: dict, batch_idx: int) -> Dict:
return self.forward(batch)
def on_train_batch_end(self, step_output: Dict, batch: Any, batch_idx: int) -> None:
self.log_batch_end(
step_output=step_output, batch=batch, loss_fn=self.train_loss, acc_fn=self.train_acc, split_type=Split.TRAIN
)
def validation_step(self, batch: dict, batch_idx: int) -> Dict:
return self.forward(batch)
def on_validation_batch_end(self, step_output: Dict, batch: Any, batch_idx: int, dataloader_idx: int = 0) -> None:
self.log_batch_end(
step_output=step_output, batch=batch, loss_fn=self.valid_loss, acc_fn=self.valid_acc, split_type=Split.VALID
)
def on_train_epoch_end(self) -> None:
self.log_epoch_end(loss_fn=self.train_loss, split_type=Split.TRAIN)
def on_validation_epoch_end(self) -> None:
self.log_epoch_end(loss_fn=self.valid_loss, split_type=Split.VALID)
def log_batch_end(self, step_output: dict, batch: dict, loss_fn: Metric, acc_fn: Metric, split_type: Split):
loss = step_output.get("loss")
loss_fn.update(loss)
self.log(f"{split_type}_loss_step", loss, batch_size=self.trainer.train_dataloader.batch_size)
labels, sentence_batch = batch
predicted_labels = torch.argmax(step_output.get("logits"), dim=1)
acc_fn(predicted_labels, labels)
self.log(f"{split_type}_acc_step", acc_fn, batch_size=self.trainer.train_dataloader.batch_size)
def log_epoch_end(self, loss_fn: Metric, split_type: Split):
loss_fn = loss_fn.compute() if loss_fn.mean_value != 0 else None
if loss_fn is not None:
self.log(f"{split_type}-loss-epoch", loss_fn)
def configure_optimizers(self):
optimizer = Optimizer.from_config(params=self.parameters(), **self.optimizer_params)
scheduler = None
if self.lr_scheduler_params is not None:
self.trainer.fit_loop.setup_data()
train_steps = self.trainer.max_steps
lr_warmup = self.lr_scheduler_params.pop("lr_warmup", 0.0)
interval = self.lr_scheduler_params.pop("interval", "epoch")
lr_scheduler = LearningRateScheduler.from_config(
optimizer=optimizer,
num_warmup_steps=lr_warmup * train_steps,
num_training_steps=train_steps,
**self.lr_scheduler_params,
)
scheduler = {
"scheduler": lr_scheduler,
"interval": interval,
"frequency": 1,
"strict": False,
"monitor": "loss",
}
if scheduler:
return [optimizer], [scheduler]
else:
return optimizer
def save_model(self, path: Union[str, Path]) -> None:
"""Save the model using the original HF AutoModel.
This is useful for when you'd like to export the model to the hub.
Args:
path: Path to save the model to.
"""
self.model.save_pretrained(path) | /sa_app-0.0.2-py3-none-any.whl/sa_app/training/lightning_model_wrapper.py | 0.960897 | 0.344058 | lightning_model_wrapper.py | pypi |
import threading
import time
from casbin.enforcer import Enforcer
from casbin.util.rwlock import RWLockWrite
class AtomicBool:
def __init__(self, value):
self._lock = threading.Lock()
self._value = value
@property
def value(self):
with self._lock:
return self._value
@value.setter
def value(self, value):
with self._lock:
self._value = value
class SyncedEnforcer:
"""SyncedEnforcer wraps Enforcer and provides synchronized access.
It's also a drop-in replacement for Enforcer"""
def __init__(self, model=None, adapter=None):
self._e = Enforcer(model, adapter)
self._rwlock = RWLockWrite()
self._rl = self._rwlock.gen_rlock()
self._wl = self._rwlock.gen_wlock()
self._auto_loading = AtomicBool(False)
self._auto_loading_thread = None
def is_auto_loading_running(self):
"""check if SyncedEnforcer is auto loading policies"""
return self._auto_loading.value
def _auto_load_policy(self, interval):
while self.is_auto_loading_running():
time.sleep(interval)
self.load_policy()
def start_auto_load_policy(self, interval):
"""starts a thread that will call load_policy every interval seconds"""
if self.is_auto_loading_running():
return
self._auto_loading.value = True
self._auto_loading_thread = threading.Thread(target=self._auto_load_policy, args=[interval], daemon=True)
self._auto_loading_thread.start()
def stop_auto_load_policy(self):
"""stops the thread started by start_auto_load_policy"""
if self.is_auto_loading_running():
self._auto_loading.value = False
def get_model(self):
"""gets the current model."""
with self._rl:
return self._e.get_model()
def set_model(self, m):
"""sets the current model."""
with self._wl:
return self._e.set_model(m)
def load_model(self):
"""reloads the model from the model CONF file.
Because the policy is attached to a model, so the policy is invalidated and needs to be reloaded by calling LoadPolicy().
"""
with self._wl:
return self._e.load_model()
def get_role_manager(self):
"""gets the current role manager."""
with self._rl:
return self._e.get_role_manager()
def set_role_manager(self, rm):
with self._wl:
self._e.set_role_manager(rm)
def get_adapter(self):
"""gets the current adapter."""
with self._rl:
self._e.get_adapter()
def set_adapter(self, adapter):
"""sets the current adapter."""
with self._wl:
self._e.set_adapter(adapter)
def set_watcher(self, watcher):
"""sets the current watcher."""
with self._wl:
self._e.set_watcher(watcher)
def set_effector(self, eft):
"""sets the current effector."""
with self._wl:
self._e.set_effector(eft)
def clear_policy(self):
"""clears all policy."""
with self._wl:
return self._e.clear_policy()
def load_policy(self):
"""reloads the policy from file/database."""
with self._wl:
return self._e.load_policy()
def load_filtered_policy(self, filter):
""" "reloads a filtered policy from file/database."""
with self._wl:
return self._e.load_filtered_policy(filter)
def save_policy(self):
with self._rl:
return self._e.save_policy()
def build_role_links(self):
"""manually rebuild the role inheritance relations."""
with self._rl:
return self._e.build_role_links()
def enforce(self, *rvals):
"""decides whether a "subject" can access a "object" with the operation "action",
input parameters are usually: (sub, obj, act).
"""
with self._rl:
return self._e.enforce(*rvals)
def enforce_ex(self, *rvals):
"""decides whether a "subject" can access a "object" with the operation "action",
input parameters are usually: (sub, obj, act).
return judge result with reason
"""
with self._rl:
return self._e.enforce_ex(*rvals)
def get_all_subjects(self):
"""gets the list of subjects that show up in the current policy."""
with self._rl:
return self._e.get_all_subjects()
def get_all_named_subjects(self, ptype):
"""gets the list of subjects that show up in the current named policy."""
with self._rl:
return self._e.get_all_named_subjects(ptype)
def get_all_objects(self):
"""gets the list of objects that show up in the current policy."""
with self._rl:
return self._e.get_all_objects()
def get_all_named_objects(self, ptype):
"""gets the list of objects that show up in the current named policy."""
with self._rl:
return self._e.get_all_named_objects(ptype)
def get_all_actions(self):
"""gets the list of actions that show up in the current policy."""
with self._rl:
return self._e.get_all_actions()
def get_all_named_actions(self, ptype):
"""gets the list of actions that show up in the current named policy."""
with self._rl:
return self._e.get_all_named_actions(ptype)
def get_all_roles(self):
"""gets the list of roles that show up in the current named policy."""
with self._rl:
return self._e.get_all_roles()
def get_all_named_roles(self, ptype):
"""gets all the authorization rules in the policy."""
with self._rl:
return self._e.get_all_named_roles(ptype)
def get_policy(self):
"""gets all the authorization rules in the policy."""
with self._rl:
return self._e.get_policy()
def get_filtered_policy(self, field_index, *field_values):
"""gets all the authorization rules in the policy, field filters can be specified."""
with self._rl:
return self._e.get_filtered_policy(field_index, *field_values)
def get_named_policy(self, ptype):
"""gets all the authorization rules in the named policy."""
with self._rl:
return self._e.get_named_policy(ptype)
def get_filtered_named_policy(self, ptype, field_index, *field_values):
"""gets all the authorization rules in the named policy, field filters can be specified."""
with self._rl:
return self._e.get_filtered_named_policy(ptype, field_index, *field_values)
def get_grouping_policy(self):
"""gets all the role inheritance rules in the policy."""
with self._rl:
return self._e.get_grouping_policy()
def get_filtered_grouping_policy(self, field_index, *field_values):
"""gets all the role inheritance rules in the policy, field filters can be specified."""
with self._rl:
return self._e.get_filtered_grouping_policy(field_index, *field_values)
def get_named_grouping_policy(self, ptype):
"""gets all the role inheritance rules in the policy."""
with self._rl:
return self._e.get_named_grouping_policy(ptype)
def get_filtered_named_grouping_policy(self, ptype, field_index, *field_values):
"""gets all the role inheritance rules in the policy, field filters can be specified."""
with self._rl:
return self._e.get_filtered_named_grouping_policy(ptype, field_index, *field_values)
def has_policy(self, *params):
"""determines whether an authorization rule exists."""
with self._rl:
return self._e.has_policy(*params)
def has_named_policy(self, ptype, *params):
"""determines whether a named authorization rule exists."""
with self._rl:
return self._e.has_named_policy(ptype, *params)
def add_policy(self, *params):
"""adds an authorization rule to the current policy.
If the rule already exists, the function returns false and the rule will not be added.
Otherwise the function returns true by adding the new rule.
"""
with self._wl:
return self._e.add_policy(*params)
def add_named_policy(self, ptype, *params):
"""adds an authorization rule to the current named policy.
If the rule already exists, the function returns false and the rule will not be added.
Otherwise the function returns true by adding the new rule.
"""
with self._wl:
return self._e.add_named_policy(ptype, *params)
def remove_policy(self, *params):
"""removes an authorization rule from the current policy."""
with self._wl:
return self._e.remove_policy(*params)
def remove_filtered_policy(self, field_index, *field_values):
"""removes an authorization rule from the current policy, field filters can be specified."""
with self._wl:
return self._e.remove_filtered_policy(field_index, *field_values)
def remove_named_policy(self, ptype, *params):
"""removes an authorization rule from the current named policy."""
with self._wl:
return self._e.remove_named_policy(ptype, *params)
def remove_filtered_named_policy(self, ptype, field_index, *field_values):
"""removes an authorization rule from the current named policy, field filters can be specified."""
with self._wl:
return self._e.remove_filtered_named_policy(ptype, field_index, *field_values)
def has_grouping_policy(self, *params):
"""determines whether a role inheritance rule exists."""
with self._rl:
return self._e.has_grouping_policy(*params)
def has_named_grouping_policy(self, ptype, *params):
"""determines whether a named role inheritance rule exists."""
with self._rl:
return self._e.has_named_grouping_policy(ptype, *params)
def add_grouping_policy(self, *params):
"""adds a role inheritance rule to the current policy.
If the rule already exists, the function returns false and the rule will not be added.
Otherwise the function returns true by adding the new rule.
"""
with self._wl:
return self._e.add_grouping_policy(*params)
def add_named_grouping_policy(self, ptype, *params):
"""adds a named role inheritance rule to the current policy.
If the rule already exists, the function returns false and the rule will not be added.
Otherwise the function returns true by adding the new rule.
"""
with self._wl:
return self._e.add_named_grouping_policy(ptype, *params)
def remove_grouping_policy(self, *params):
"""removes a role inheritance rule from the current policy."""
with self._wl:
return self._e.remove_grouping_policy(*params)
def remove_filtered_grouping_policy(self, field_index, *field_values):
"""removes a role inheritance rule from the current policy, field filters can be specified."""
with self._wl:
return self._e.remove_filtered_grouping_policy(field_index, *field_values)
def remove_named_grouping_policy(self, ptype, *params):
"""removes a role inheritance rule from the current named policy."""
with self._wl:
return self._e.remove_named_grouping_policy(ptype, *params)
def remove_filtered_named_grouping_policy(self, ptype, field_index, *field_values):
"""removes a role inheritance rule from the current named policy, field filters can be specified."""
with self._wl:
return self._e.remove_filtered_named_grouping_policy(ptype, field_index, *field_values)
def add_function(self, name, func):
"""adds a customized function."""
with self._wl:
return self._e.add_function(name, func)
# enforcer.py
def get_roles_for_user(self, name):
"""gets the roles that a user has."""
with self._rl:
return self._e.get_roles_for_user(name)
def get_users_for_role(self, name):
"""gets the users that has a role."""
with self._rl:
return self._e.get_users_for_role(name)
def has_role_for_user(self, name, role):
"""determines whether a user has a role."""
with self._rl:
return self._e.has_role_for_user(name, role)
def add_role_for_user(self, user, role):
"""
adds a role for a user.
Returns false if the user already has the role (aka not affected).
"""
with self._wl:
return self._e.add_role_for_user(user, role)
def delete_role_for_user(self, user, role):
"""
deletes a role for a user.
Returns false if the user does not have the role (aka not affected).
"""
with self._wl:
return self._e.delete_role_for_user(user, role)
def delete_roles_for_user(self, user):
"""
deletes all roles for a user.
Returns false if the user does not have any roles (aka not affected).
"""
with self._wl:
return self._e.delete_roles_for_user(user)
def delete_user(self, user):
"""
deletes a user.
Returns false if the user does not exist (aka not affected).
"""
with self._wl:
return self._e.delete_user(user)
def delete_role(self, role):
"""
deletes a role.
Returns false if the role does not exist (aka not affected).
"""
with self._wl:
return self._e.delete_role(role)
def delete_permission(self, *permission):
"""
deletes a permission.
Returns false if the permission does not exist (aka not affected).
"""
with self._wl:
return self._e.delete_permission(*permission)
def add_permission_for_user(self, user, *permission):
"""
adds a permission for a user or role.
Returns false if the user or role already has the permission (aka not affected).
"""
with self._wl:
return self._e.add_permission_for_user(user, *permission)
def delete_permission_for_user(self, user, *permission):
"""
deletes a permission for a user or role.
Returns false if the user or role does not have the permission (aka not affected).
"""
with self._wl:
return self._e.delete_permission_for_user(user, *permission)
def delete_permissions_for_user(self, user):
"""
deletes permissions for a user or role.
Returns false if the user or role does not have any permissions (aka not affected).
"""
with self._wl:
return self._e.delete_permissions_for_user(user)
def get_permissions_for_user(self, user):
"""
gets permissions for a user or role.
"""
with self._rl:
return self._e.get_permissions_for_user(user)
def has_permission_for_user(self, user, *permission):
"""
determines whether a user has a permission.
"""
with self._rl:
return self._e.has_permission_for_user(user, *permission)
def get_implicit_roles_for_user(self, name, *domain):
"""
gets implicit roles that a user has.
Compared to get_roles_for_user(), this function retrieves indirect roles besides direct roles.
For example:
g, alice, role:admin
g, role:admin, role:user
get_roles_for_user("alice") can only get: ["role:admin"].
But get_implicit_roles_for_user("alice") will get: ["role:admin", "role:user"].
"""
with self._rl:
return self._e.get_implicit_roles_for_user(name, *domain)
def get_implicit_permissions_for_user(self, user, *domain, filter_policy_dom=True):
"""
gets implicit permissions for a user or role.
Compared to get_permissions_for_user(), this function retrieves permissions for inherited roles.
For example:
p, admin, data1, read
p, alice, data2, read
g, alice, admin
get_permissions_for_user("alice") can only get: [["alice", "data2", "read"]].
But get_implicit_permissions_for_user("alice") will get: [["admin", "data1", "read"], ["alice", "data2", "read"]].
"""
with self._rl:
return self._e.get_implicit_permissions_for_user(user, *domain, filter_policy_dom=filter_policy_dom)
def get_named_implicit_permissions_for_user(self, ptype, user, *domain, filter_policy_dom=True):
"""
gets implicit permissions for a user or role by named policy.
Compared to get_permissions_for_user(), this function retrieves permissions for inherited roles.
For example:
p, admin, data1, read
p, alice, data2, read
g, alice, admin
get_permissions_for_user("alice") can only get: [["alice", "data2", "read"]].
But get_implicit_permissions_for_user("alice") will get: [["admin", "data1", "read"], ["alice", "data2", "read"]].
"""
with self._rl:
return self._e.get_named_implicit_permissions_for_user(
ptype, user, *domain, filter_policy_dom=filter_policy_dom
)
def get_implicit_users_for_permission(self, *permission):
"""
gets implicit users for a permission.
For example:
p, admin, data1, read
p, bob, data1, read
g, alice, admin
get_implicit_users_for_permission("data1", "read") will get: ["alice", "bob"].
Note: only users will be returned, roles (2nd arg in "g") will be excluded.
"""
with self._rl:
return self._e.get_implicit_users_for_permission(*permission)
def get_roles_for_user_in_domain(self, name, domain):
"""gets the roles that a user has inside a domain."""
with self._rl:
return self._e.get_roles_for_user_in_domain(name, domain)
def get_users_for_role_in_domain(self, name, domain):
"""gets the users that has a role inside a domain."""
with self._rl:
return self._e.get_users_for_role_in_domain(name, domain)
def add_role_for_user_in_domain(self, user, role, domain):
"""adds a role for a user inside a domain."""
"""Returns false if the user already has the role (aka not affected)."""
with self._wl:
return self._e.add_role_for_user_in_domain(user, role, domain)
def delete_roles_for_user_in_domain(self, user, role, domain):
"""deletes a role for a user inside a domain."""
"""Returns false if the user does not have any roles (aka not affected)."""
with self._wl:
return self._e.delete_roles_for_user_in_domain(user, role, domain)
def get_permissions_for_user_in_domain(self, user, domain):
"""gets permissions for a user or role inside domain."""
with self._rl:
return self._e.get_permissions_for_user_in_domain(user, domain)
def get_named_permissions_for_user_in_domain(self, ptype, user, domain):
"""gets permissions for a user or role by named policy inside domain."""
with self._rl:
return self._e.get_named_permissions_for_user_in_domain(ptype, user, domain)
def enable_auto_build_role_links(self, auto_build_role_links):
"""controls whether to rebuild the role inheritance relations when a role is added or deleted."""
with self._wl:
return self._e.enable_auto_build_role_links(auto_build_role_links)
def enable_auto_save(self, auto_save):
"""controls whether to save a policy rule automatically to the adapter when it is added or removed."""
with self._wl:
return self._e.enable_auto_save(auto_save)
def enable_enforce(self, enabled=True):
"""changes the enforcing state of Casbin,
when Casbin is disabled, all access will be allowed by the Enforce() function.
"""
with self._wl:
return self._e.enable_enforce(enabled)
def add_named_matching_func(self, ptype, fn):
"""add_named_matching_func add MatchingFunc by ptype RoleManager"""
with self._wl:
self._e.add_named_matching_func(ptype, fn)
def add_named_domain_matching_func(self, ptype, fn):
"""add_named_domain_matching_func add MatchingFunc by ptype to RoleManager"""
with self._wl:
self._e.add_named_domain_matching_func(ptype, fn)
def is_filtered(self):
"""returns true if the loaded policy has been filtered."""
with self._rl:
self._e.is_filtered()
def add_policies(self, rules):
"""adds authorization rules to the current policy.
If the rule already exists, the function returns false for the corresponding rule and the rule will not be added.
Otherwise the function returns true for the corresponding rule by adding the new rule.
"""
with self._wl:
return self._e.add_policies(rules)
def add_named_policies(self, ptype, rules):
"""adds authorization rules to the current named policy.
If the rule already exists, the function returns false for the corresponding rule and the rule will not be added.
Otherwise the function returns true for the corresponding by adding the new rule."""
with self._wl:
return self._e.add_named_policies(ptype, rules)
def remove_policies(self, rules):
"""removes authorization rules from the current policy."""
with self._wl:
return self._e.remove_policies(rules)
def remove_named_policies(self, ptype, rules):
"""removes authorization rules from the current named policy."""
with self._wl:
return self._e.remove_named_policies(ptype, rules)
def add_grouping_policies(self, rules):
"""adds role inheritance rulea to the current policy.
If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added.
Otherwise the function returns true for the corresponding policy rule by adding the new rule.
"""
with self._wl:
return self._e.add_grouping_policies(rules)
def add_named_grouping_policies(self, ptype, rules):
""" "adds named role inheritance rules to the current policy.
If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added.
Otherwise the function returns true for the corresponding policy rule by adding the new rule."""
with self._wl:
return self._e.add_named_grouping_policies(ptype, rules)
def remove_grouping_policies(self, rules):
"""removes role inheritance rulea from the current policy."""
with self._wl:
return self._e.addremove_grouping_policies_policies(rules)
def remove_named_grouping_policies(self, ptype, rules):
"""removes role inheritance rules from the current named policy."""
with self._wl:
return self._e.remove_named_grouping_policies(ptype, rules)
def build_incremental_role_links(self, op, ptype, rules):
self.get_model().build_incremental_role_links(self.get_role_manager(), op, "g", ptype, rules)
def new_enforce_context(self, suffix: str) -> "EnforceContext":
return self._e.new_enforce_context(suffix) | /sa_casbin-1.1.0-py3-none-any.whl/casbin/synced_enforcer.py | 0.833731 | 0.215681 | synced_enforcer.py | pypi |
from functools import partial
from casbin.management_enforcer import ManagementEnforcer
from casbin.util import join_slice, array_remove_duplicates, set_subtract
class Enforcer(ManagementEnforcer):
"""
Enforcer = ManagementEnforcer + RBAC_API + RBAC_WITH_DOMAIN_API
"""
"""creates an enforcer via file or DB.
File:
e = casbin.Enforcer("path/to/basic_model.conf", "path/to/basic_policy.csv")
MySQL DB:
a = mysqladapter.DBAdapter("mysql", "mysql_username:mysql_password@tcp(127.0.0.1:3306)/")
e = casbin.Enforcer("path/to/basic_model.conf", a)
"""
def get_roles_for_user(self, name):
"""gets the roles that a user has."""
return self.model.model["g"]["g"].rm.get_roles(name)
def get_users_for_role(self, name):
"""gets the users that has a role."""
return self.model.model["g"]["g"].rm.get_users(name)
def has_role_for_user(self, name, role):
"""determines whether a user has a role."""
roles = self.get_roles_for_user(name)
return any(r == role for r in roles)
def add_role_for_user(self, user, role):
"""
adds a role for a user.
Returns false if the user already has the role (aka not affected).
"""
return self.add_grouping_policy(user, role)
def delete_role_for_user(self, user, role):
"""
deletes a role for a user.
Returns false if the user does not have the role (aka not affected).
"""
return self.remove_grouping_policy(user, role)
def delete_roles_for_user(self, user):
"""
deletes all roles for a user.
Returns false if the user does not have any roles (aka not affected).
"""
return self.remove_filtered_grouping_policy(0, user)
def delete_user(self, user):
"""
deletes a user.
Returns false if the user does not exist (aka not affected).
"""
res1 = self.remove_filtered_grouping_policy(0, user)
res2 = self.remove_filtered_policy(0, user)
return res1 or res2
def delete_role(self, role):
"""
deletes a role.
Returns false if the role does not exist (aka not affected).
"""
res1 = self.remove_filtered_grouping_policy(1, role)
res2 = self.remove_filtered_policy(0, role)
return res1 or res2
def delete_permission(self, *permission):
"""
deletes a permission.
Returns false if the permission does not exist (aka not affected).
"""
return self.remove_filtered_policy(1, *permission)
def add_permission_for_user(self, user, *permission):
"""
adds a permission for a user or role.
Returns false if the user or role already has the permission (aka not affected).
"""
return self.add_policy(join_slice(user, *permission))
def delete_permission_for_user(self, user, *permission):
"""
deletes a permission for a user or role.
Returns false if the user or role does not have the permission (aka not affected).
"""
return self.remove_policy(join_slice(user, *permission))
def delete_permissions_for_user(self, user):
"""
deletes permissions for a user or role.
Returns false if the user or role does not have any permissions (aka not affected).
"""
return self.remove_filtered_policy(0, user)
def get_permissions_for_user(self, user):
"""
gets permissions for a user or role.
"""
return self.get_filtered_policy(0, user)
def has_permission_for_user(self, user, *permission):
"""
determines whether a user has a permission.
"""
return self.has_policy(join_slice(user, *permission))
def get_implicit_roles_for_user(self, name, domain=""):
"""
gets implicit roles that a user has.
Compared to get_roles_for_user(), this function retrieves indirect roles besides direct roles.
For example:
g, alice, role:admin
g, role:admin, role:user
get_roles_for_user("alice") can only get: ["role:admin"].
But get_implicit_roles_for_user("alice") will get: ["role:admin", "role:user"].
"""
res = []
queue = [name]
while queue:
name = queue.pop(0)
for rm in self.rm_map.values():
roles = rm.get_roles(name, domain)
for r in roles:
if r not in res:
res.append(r)
queue.append(r)
return res
def get_implicit_permissions_for_user(self, user, domain="", filter_policy_dom=True):
"""
gets implicit permissions for a user or role.
Compared to get_permissions_for_user(), this function retrieves permissions for inherited roles.
For example:
p, admin, data1, read
p, alice, data2, read
g, alice, admin
get_permissions_for_user("alice") can only get: [["alice", "data2", "read"]].
But get_implicit_permissions_for_user("alice") will get: [["admin", "data1", "read"], ["alice", "data2", "read"]].
For given domain policies are filtered by corresponding domain matching function of DomainManager
Inherited roles can be matched by domain. For domain neutral policies set:
filter_policy_dom = False
filter_policy_dom: bool - For given *domain*, policies will be filtered by domain as well. Default = True
"""
return self.get_named_implicit_permissions_for_user("p", user, domain, filter_policy_dom)
def get_named_implicit_permissions_for_user(self, ptype, user, domain="", filter_policy_dom=True):
"""
gets implicit permissions for a user or role by named policy.
Compared to get_permissions_for_user(), this function retrieves permissions for inherited roles.
For example:
p, admin, data1, read
p, alice, data2, read
g, alice, admin
get_permissions_for_user("alice") can only get: [["alice", "data2", "read"]].
But get_implicit_permissions_for_user("alice") will get: [["admin", "data1", "read"], ["alice", "data2", "read"]].
For given domain policies are filtered by corresponding domain matching function of DomainManager
Inherited roles can be matched by domain. For domain neutral policies set:
filter_policy_dom = False
filter_policy_dom: bool - For given *domain*, policies will be filtered by domain as well. Default = True
"""
roles = self.get_implicit_roles_for_user(user, domain)
roles.insert(0, user)
res = []
# policy domain should be matched by domain_match_fn of DomainManager
domain_matching_func = self.get_role_manager().domain_matching_func
if domain and domain_matching_func != None:
domain = partial(domain_matching_func, domain)
for role in roles:
permissions = self.get_named_permissions_for_user_in_domain(
ptype, role, domain if filter_policy_dom else ""
)
res.extend(permissions)
return res
def get_implicit_users_for_permission(self, *permission):
"""
gets implicit users for a permission.
For example:
p, admin, data1, read
p, bob, data1, read
g, alice, admin
get_implicit_users_for_permission("data1", "read") will get: ["alice", "bob"].
Note: only users will be returned, roles (2nd arg in "g") will be excluded.
"""
p_subjects = self.get_all_subjects()
g_inherit = self.model.get_values_for_field_in_policy("g", "g", 1)
g_subjects = self.model.get_values_for_field_in_policy("g", "g", 0)
subjects = array_remove_duplicates(g_subjects + p_subjects)
res = list()
subjects = set_subtract(subjects, g_inherit)
for user in subjects:
req = join_slice(user, *permission)
allowed = self.enforce(*req)
if allowed:
res.append(user)
return res
def get_roles_for_user_in_domain(self, name, domain):
"""gets the roles that a user has inside a domain."""
return self.model.model["g"]["g"].rm.get_roles(name, domain)
def get_users_for_role_in_domain(self, name, domain):
"""gets the users that has a role inside a domain."""
return self.model.model["g"]["g"].rm.get_users(name, domain)
def add_role_for_user_in_domain(self, user, role, domain):
"""adds a role for a user inside a domain."""
"""Returns false if the user already has the role (aka not affected)."""
return self.add_grouping_policy(user, role, domain)
def delete_roles_for_user_in_domain(self, user, role, domain):
"""deletes a role for a user inside a domain."""
"""Returns false if the user does not have any roles (aka not affected)."""
return self.remove_filtered_grouping_policy(0, user, role, domain)
def get_permissions_for_user_in_domain(self, user, domain):
"""gets permissions for a user or role inside domain."""
return self.get_named_permissions_for_user_in_domain("p", user, domain)
def get_named_permissions_for_user_in_domain(self, ptype, user, domain):
"""gets permissions for a user or role with named policy inside domain."""
return self.get_filtered_named_policy(ptype, 0, user, domain) | /sa_casbin-1.1.0-py3-none-any.whl/casbin/enforcer.py | 0.729616 | 0.161089 | enforcer.py | pypi |
from casbin.internal_enforcer import InternalEnforcer
from casbin.model.policy_op import PolicyOp
class ManagementEnforcer(InternalEnforcer):
"""
ManagementEnforcer = InternalEnforcer + Management API.
"""
def get_all_subjects(self):
"""gets the list of subjects that show up in the current policy."""
return self.get_all_named_subjects("p")
def get_all_named_subjects(self, ptype):
"""gets the list of subjects that show up in the current named policy."""
return self.model.get_values_for_field_in_policy("p", ptype, 0)
def get_all_objects(self):
"""gets the list of objects that show up in the current policy."""
return self.get_all_named_objects("p")
def get_all_named_objects(self, ptype):
"""gets the list of objects that show up in the current named policy."""
return self.model.get_values_for_field_in_policy("p", ptype, 1)
def get_all_actions(self):
"""gets the list of actions that show up in the current policy."""
return self.get_all_named_actions("p")
def get_all_named_actions(self, ptype):
"""gets the list of actions that show up in the current named policy."""
return self.model.get_values_for_field_in_policy("p", ptype, 2)
def get_all_roles(self):
"""gets the list of roles that show up in the current named policy."""
return self.get_all_named_roles("g")
def get_all_named_roles(self, ptype):
"""gets all the authorization rules in the policy."""
return self.model.get_values_for_field_in_policy("g", ptype, 1)
def get_policy(self):
"""gets all the authorization rules in the policy."""
return self.get_named_policy("p")
def get_filtered_policy(self, field_index, *field_values):
"""gets all the authorization rules in the policy, field filters can be specified."""
return self.get_filtered_named_policy("p", field_index, *field_values)
def get_named_policy(self, ptype):
"""gets all the authorization rules in the named policy."""
return self.model.get_policy("p", ptype)
def get_filtered_named_policy(self, ptype, field_index, *field_values):
"""gets all the authorization rules in the named policy, field filters can be specified."""
return self.model.get_filtered_policy("p", ptype, field_index, *field_values)
def get_grouping_policy(self):
"""gets all the role inheritance rules in the policy."""
return self.get_named_grouping_policy("g")
def get_filtered_grouping_policy(self, field_index, *field_values):
"""gets all the role inheritance rules in the policy, field filters can be specified."""
return self.get_filtered_named_grouping_policy("g", field_index, *field_values)
def get_named_grouping_policy(self, ptype):
"""gets all the role inheritance rules in the policy."""
return self.model.get_policy("g", ptype)
def get_filtered_named_grouping_policy(self, ptype, field_index, *field_values):
"""gets all the role inheritance rules in the policy, field filters can be specified."""
return self.model.get_filtered_policy("g", ptype, field_index, *field_values)
def has_policy(self, *params):
"""determines whether an authorization rule exists."""
return self.has_named_policy("p", *params)
def has_named_policy(self, ptype, *params):
"""determines whether a named authorization rule exists."""
if len(params) == 1 and isinstance(params[0], list):
str_slice = params[0]
return self.model.has_policy("p", ptype, str_slice)
return self.model.has_policy("p", ptype, list(params))
def add_policy(self, *params):
"""adds an authorization rule to the current policy.
If the rule already exists, the function returns false and the rule will not be added.
Otherwise the function returns true by adding the new rule.
"""
return self.add_named_policy("p", *params)
def add_policies(self, rules):
"""adds authorization rules to the current policy.
If the rule already exists, the function returns false for the corresponding rule and the rule will not be added.
Otherwise the function returns true for the corresponding rule by adding the new rule.
"""
return self.add_named_policies("p", rules)
def add_named_policy(self, ptype, *params):
"""adds an authorization rule to the current named policy.
If the rule already exists, the function returns false and the rule will not be added.
Otherwise the function returns true by adding the new rule.
"""
if len(params) == 1 and isinstance(params[0], list):
str_slice = params[0]
rule_added = self._add_policy("p", ptype, str_slice)
else:
rule_added = self._add_policy("p", ptype, list(params))
return rule_added
def add_named_policies(self, ptype, rules):
"""adds authorization rules to the current named policy.
If the rule already exists, the function returns false for the corresponding rule and the rule will not be added.
Otherwise the function returns true for the corresponding by adding the new rule."""
return self._add_policies("p", ptype, rules)
def update_policy(self, old_rule, new_rule):
"""updates an authorization rule from the current policy."""
return self.update_named_policy("p", old_rule, new_rule)
def update_policies(self, old_rules, new_rules):
"""updates authorization rules from the current policy."""
return self.update_named_policies("p", old_rules, new_rules)
def update_named_policy(self, ptype, old_rule, new_rule):
"""updates an authorization rule from the current named policy."""
return self._update_policy("p", ptype, old_rule, new_rule)
def update_named_policies(self, ptype, old_rules, new_rules):
"""updates authorization rules from the current named policy."""
return self._update_policies("p", ptype, old_rules, new_rules)
def update_filtered_policies(self, new_rules, field_index, *field_values):
"""update_filtered_policies deletes old rules and adds new rules."""
return self.update_filtered_named_policies("p", new_rules, field_index, *field_values)
def update_filtered_named_policies(self, ptype, new_rules, field_index, *field_values):
"""update_filtered_named_policies deletes old rules and adds new rules."""
return self._update_filtered_policies("p", ptype, new_rules, field_index, *field_values)
def remove_policy(self, *params):
"""removes an authorization rule from the current policy."""
return self.remove_named_policy("p", *params)
def remove_policies(self, rules):
"""removes authorization rules from the current policy."""
return self.remove_named_policies("p", rules)
def remove_filtered_policy(self, field_index, *field_values):
"""removes an authorization rule from the current policy, field filters can be specified."""
return self.remove_filtered_named_policy("p", field_index, *field_values)
def remove_named_policy(self, ptype, *params):
"""removes an authorization rule from the current named policy."""
if len(params) == 1 and isinstance(params[0], list):
str_slice = params[0]
rule_removed = self._remove_policy("p", ptype, str_slice)
else:
rule_removed = self._remove_policy("p", ptype, list(params))
return rule_removed
def remove_named_policies(self, ptype, rules):
"""removes authorization rules from the current named policy."""
return self._remove_policies("p", ptype, rules)
def remove_filtered_named_policy(self, ptype, field_index, *field_values):
"""removes an authorization rule from the current named policy, field filters can be specified."""
return self._remove_filtered_policy("p", ptype, field_index, *field_values)
def has_grouping_policy(self, *params):
"""determines whether a role inheritance rule exists."""
return self.has_named_grouping_policy("g", *params)
def has_named_grouping_policy(self, ptype, *params):
"""determines whether a named role inheritance rule exists."""
if len(params) == 1 and isinstance(params[0], list):
str_slice = params[0]
return self.model.has_policy("g", ptype, str_slice)
return self.model.has_policy("g", ptype, list(params))
def add_grouping_policy(self, *params):
"""adds a role inheritance rule to the current policy.
If the rule already exists, the function returns false and the rule will not be added.
Otherwise the function returns true by adding the new rule.
"""
return self.add_named_grouping_policy("g", *params)
def add_grouping_policies(self, rules):
"""adds role inheritance rulea to the current policy.
If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added.
Otherwise the function returns true for the corresponding policy rule by adding the new rule.
"""
return self.add_named_grouping_policies("g", rules)
def add_named_grouping_policy(self, ptype, *params):
"""adds a named role inheritance rule to the current policy.
If the rule already exists, the function returns false and the rule will not be added.
Otherwise the function returns true by adding the new rule.
"""
rules = []
if len(params) == 1 and isinstance(params[0], list):
str_slice = params[0]
rule_added = self._add_policy("g", ptype, str_slice)
rules.append(str_slice)
else:
rule_added = self._add_policy("g", ptype, list(params))
rules.append(list(params))
if self.auto_build_role_links:
self.model.build_incremental_role_links(self.rm_map[ptype], PolicyOp.Policy_add, "g", ptype, rules)
return rule_added
def add_named_grouping_policies(self, ptype, rules):
""" "adds named role inheritance rules to the current policy.
If the rule already exists, the function returns false for the corresponding policy rule and the rule will not be added.
Otherwise the function returns true for the corresponding policy rule by adding the new rule."""
rules_added = self._add_policies("g", ptype, rules)
if self.auto_build_role_links:
self.model.build_incremental_role_links(self.rm_map[ptype], PolicyOp.Policy_add, "g", ptype, rules)
return rules_added
def remove_grouping_policy(self, *params):
"""removes a role inheritance rule from the current policy."""
return self.remove_named_grouping_policy("g", *params)
def remove_grouping_policies(self, rules):
"""removes role inheritance rulea from the current policy."""
return self.remove_named_grouping_policies("g", rules)
def remove_filtered_grouping_policy(self, field_index, *field_values):
"""removes a role inheritance rule from the current policy, field filters can be specified."""
return self.remove_filtered_named_grouping_policy("g", field_index, *field_values)
def remove_named_grouping_policy(self, ptype, *params):
"""removes a role inheritance rule from the current named policy."""
rules = []
if len(params) == 1 and isinstance(params[0], list):
str_slice = params[0]
rule_removed = self._remove_policy("g", ptype, str_slice)
rules.append(str_slice)
else:
rule_removed = self._remove_policy("g", ptype, list(params))
rules.append(list(params))
if self.auto_build_role_links and rule_removed:
self.model.build_incremental_role_links(self.rm_map[ptype], PolicyOp.Policy_remove, "g", ptype, rules)
return rule_removed
def remove_named_grouping_policies(self, ptype, rules):
"""removes role inheritance rules from the current named policy."""
rules_removed = self._remove_policies("g", ptype, rules)
if self.auto_build_role_links and rules_removed:
self.model.build_incremental_role_links(self.rm_map[ptype], PolicyOp.Policy_remove, "g", ptype, rules)
return rules_removed
def remove_filtered_named_grouping_policy(self, ptype, field_index, *field_values):
"""removes a role inheritance rule from the current named policy, field filters can be specified."""
rule_removed = self._remove_filtered_policy_returns_effects("g", ptype, field_index, *field_values)
if self.auto_build_role_links and rule_removed:
self.model.build_incremental_role_links(
self.rm_map[ptype], PolicyOp.Policy_remove, "g", ptype, rule_removed
)
return rule_removed
def add_function(self, name, func):
"""adds a customized function."""
self.fm.add_function(name, func) | /sa_casbin-1.1.0-py3-none-any.whl/casbin/management_enforcer.py | 0.804828 | 0.400398 | management_enforcer.py | pypi |
from io import StringIO
class Config:
"""represents an implementation of the ConfigInterface"""
# DEFAULT_SECTION specifies the name of a section if no name provided
DEFAULT_SECTION = "default"
# DEFAULT_COMMENT defines what character(s) indicate a comment `#`
DEFAULT_COMMENT = "#"
# DEFAULT_COMMENT_SEM defines what alternate character(s) indicate a comment `;`
DEFAULT_COMMENT_SEM = ";"
# DEFAULT_MULTI_LINE_SEPARATOR defines what character indicates a multi-line content
DEFAULT_MULTI_LINE_SEPARATOR = "\\"
_data = dict()
def __init__(self):
self._data = dict()
@staticmethod
def new_config(conf_name):
c = Config()
c._parse(conf_name)
return c
@staticmethod
def new_config_from_text(text):
c = Config()
f = StringIO(text)
c._parse_buffer(f)
return c
def add_config(self, section, option, value):
if section == "":
section = self.DEFAULT_SECTION
if section not in self._data.keys():
self._data[section] = {}
self._data[section][option] = value
def _parse(self, fname):
with open(fname, "r", encoding="utf-8") as f:
self._parse_buffer(f)
def _parse_buffer(self, f):
section = ""
line_num = 0
buf = []
can_write = False
while True:
if can_write:
self._write(section, line_num, buf)
can_write = False
line_num = line_num + 1
line = f.readline()
if not line:
if len(buf) > 0:
self._write(section, line_num, buf)
break
line = line.strip()
if "" == line or self.DEFAULT_COMMENT == line[0:1] or self.DEFAULT_COMMENT_SEM == line[0:1]:
can_write = True
continue
elif "[" == line[0:1] and "]" == line[-1]:
if len(buf) > 0:
self._write(section, line_num, buf)
can_write = False
section = line[1:-1]
else:
p = ""
if self.DEFAULT_MULTI_LINE_SEPARATOR == line[-1]:
p = line[0:-1].strip()
p = p + " "
else:
p = line
can_write = True
buf.append(p)
def _write(self, section, line_num, b):
buf = "".join(b)
if len(buf) <= 0:
return
option_val = buf.split("=", 1)
if len(option_val) != 2:
raise RuntimeError("parse the content error : line {} , {} = ?".format(line_num, option_val[0]))
option = option_val[0].strip()
value = option_val[1].strip()
self.add_config(section, option, value)
del b[:]
def get_bool(self, key):
"""lookups up the value using the provided key and converts the value to a bool."""
return self.get(key).capitalize() == "True"
def get_int(self, key):
"""lookups up the value using the provided key and converts the value to a int"""
return int(self.get(key))
def get_float(self, key):
"""lookups up the value using the provided key and converts the value to a float"""
return float(self.get(key))
def get_string(self, key):
"""lookups up the value using the provided key and converts the value to a string"""
return self.get(key)
def get_strings(self, key):
"""lookups up the value using the provided key and converts the value to an array of string"""
value = self.get(key)
if value == "":
return None
return value.split(",")
def set(self, key, value):
if len(key) == 0:
raise RuntimeError("key is empty")
keys = key.lower().split("::")
if len(keys) >= 2:
section = keys[0]
option = keys[1]
else:
section = ""
option = keys[0]
self.add_config(section, option, value)
def get(self, key):
"""section.key or key"""
keys = key.lower().split("::")
if len(keys) >= 2:
section = keys[0]
option = keys[1]
else:
section = self.DEFAULT_SECTION
option = keys[0]
if section in self._data.keys():
if option in self._data[section].keys():
return self._data[section][option]
return "" | /sa_casbin-1.1.0-py3-none-any.whl/casbin/config/config.py | 0.88457 | 0.224767 | config.py | pypi |
from .effector import Effector
class AllowOverrideEffector(Effector):
def intermediate_effect(self, effects):
"""returns a intermediate effect based on the matched effects of the enforcer"""
if Effector.ALLOW in effects:
return Effector.ALLOW
return Effector.INDETERMINATE
def final_effect(self, effects):
"""returns the final effect based on the matched effects of the enforcer"""
if Effector.ALLOW in effects:
return Effector.ALLOW
return Effector.DENY
class DenyOverrideEffector(Effector):
def intermediate_effect(self, effects):
"""returns a intermediate effect based on the matched effects of the enforcer"""
if Effector.DENY in effects:
return Effector.DENY
return Effector.INDETERMINATE
def final_effect(self, effects):
"""returns the final effect based on the matched effects of the enforcer"""
if Effector.DENY in effects:
return Effector.DENY
return Effector.ALLOW
class AllowAndDenyEffector(Effector):
def intermediate_effect(self, effects):
"""returns a intermediate effect based on the matched effects of the enforcer"""
if Effector.DENY in effects:
return Effector.DENY
return Effector.INDETERMINATE
def final_effect(self, effects):
"""returns the final effect based on the matched effects of the enforcer"""
if Effector.DENY in effects or Effector.ALLOW not in effects:
return Effector.DENY
return Effector.ALLOW
class PriorityEffector(Effector):
def intermediate_effect(self, effects):
"""returns a intermediate effect based on the matched effects of the enforcer"""
if Effector.ALLOW in effects:
return Effector.ALLOW
if Effector.DENY in effects:
return Effector.DENY
return Effector.INDETERMINATE
def final_effect(self, effects):
"""returns the final effect based on the matched effects of the enforcer"""
if Effector.ALLOW in effects:
return Effector.ALLOW
if Effector.DENY in effects:
return Effector.DENY
return Effector.DENY | /sa_casbin-1.1.0-py3-none-any.whl/casbin/effect/default_effectors.py | 0.879088 | 0.669151 | default_effectors.py | pypi |
import ipaddress
import re
KEY_MATCH2_PATTERN = re.compile(r"(.*?):[^\/]+(.*?)")
KEY_MATCH3_PATTERN = re.compile(r"(.*?){[^\/]+?}(.*?)")
KEY_MATCH4_PATTERN = re.compile(r"{([^/]+)}")
def key_match(key1, key2):
"""determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *.
For example, "/foo/bar" matches "/foo/*"
"""
i = key2.find("*")
if i == -1:
return key1 == key2
if len(key1) > i:
return key1[:i] == key2[:i]
return key1 == key2[:i]
def key_match_func(*args):
"""The wrapper for key_match."""
name1 = args[0]
name2 = args[1]
return key_match(name1, name2)
def key_get(key1, key2):
"""
key_get returns the matched part
For example, "/foo/bar/foo" matches "/foo/*"
"bar/foo" will been returned
"""
i = key2.find("*")
if i == -1:
return ""
if len(key1) > i:
if key1[:i] == key2[:i]:
return key1[i:]
return ""
def key_match2(key1, key2):
"""determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *.
For example, "/foo/bar" matches "/foo/*", "/resource1" matches "/:resource"
"""
key2 = key2.replace("/*", "/.*")
key2 = KEY_MATCH2_PATTERN.sub(r"\g<1>[^\/]+\g<2>", key2, 0)
if key2 == "*":
key2 = "(.*)"
return regex_match(key1, "^" + key2 + "$")
def key_match2_func(*args):
name1 = args[0]
name2 = args[1]
return key_match2(name1, name2)
def key_get2(key1, key2, path_var):
"""
key_get2 returns value matched pattern
For example, "/resource1" matches "/:resource"
if the pathVar == "resource", then "resource1" will be returned
"""
key2 = key2.replace("/*", "/.*")
keys = re.findall(":[^/]+", key2)
key2 = KEY_MATCH2_PATTERN.sub(r"\g<1>([^\/]+)\g<2>", key2, 0)
if key2 == "*":
key2 = "(.*)"
key2 = "^" + key2 + "$"
values = re.match(key2, key1)
if values is None:
return ""
for i, key in enumerate(keys):
if path_var == key[1:]:
return values.groups()[i]
return ""
def key_match3(key1, key2):
"""determines determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *.
For example, "/foo/bar" matches "/foo/*", "/resource1" matches "/{resource}"
"""
key2 = key2.replace("/*", "/.*")
key2 = KEY_MATCH3_PATTERN.sub(r"\g<1>[^\/]+\g<2>", key2, 0)
return regex_match(key1, "^" + key2 + "$")
def key_match3_func(*args):
name1 = args[0]
name2 = args[1]
return key_match3(name1, name2)
def key_get3(key1, key2, path_var):
"""
key_get3 returns value matched pattern
For example, "project/proj_project1_admin/" matches "project/proj_{project}_admin/"
if the pathVar == "project", then "project1" will be returned
"""
key2 = key2.replace("/*", "/.*")
keys = re.findall(r"{[^/]+?}", key2)
key2 = KEY_MATCH3_PATTERN.sub(r"\g<1>([^/]+?)\g<2>", key2, 0)
if key2 == "*":
key2 = "(.*)"
key2 = "^" + key2 + "$"
values = re.match(key2, key1)
if values is None:
return ""
for i, key in enumerate(keys):
if path_var == key[1 : len(key) - 1]:
return values.groups()[i]
return ""
def key_match4(key1: str, key2: str) -> bool:
"""
key_match4 determines whether key1 matches the pattern of key2 (similar to RESTful path), key2 can contain a *.
Besides what key_match3 does, key_match4 can also match repeated patterns:
"/parent/123/child/123" matches "/parent/{id}/child/{id}"
"/parent/123/child/456" does not match "/parent/{id}/child/{id}"
But key_match3 will match both.
"""
key2 = key2.replace("/*", "/.*")
tokens: [str] = []
def repl(matchobj):
tokens.append(matchobj.group(1))
return "([^/]+)"
key2 = KEY_MATCH4_PATTERN.sub(repl, key2)
regexp = re.compile("^" + key2 + "$")
matches = regexp.match(key1)
if matches is None:
return False
if len(tokens) != len(matches.groups()):
raise Exception("KeyMatch4: number of tokens is not equal to number of values")
tokens_matches = dict()
for i in range(len(tokens)):
token, match = tokens[i], matches.groups()[i]
if token not in tokens_matches.keys():
tokens_matches[token] = match
else:
if tokens_matches[token] != match:
return False
return True
def key_match4_func(*args) -> bool:
"""
key_match4_func is the wrapper for key_match4.
"""
name1 = args[0]
name2 = args[1]
return key_match4(name1, name2)
def regex_match(key1, key2):
"""determines whether key1 matches the pattern of key2 in regular expression."""
res = re.match(key2, key1)
if res:
return True
else:
return False
def regex_match_func(*args):
"""the wrapper for RegexMatch."""
name1 = args[0]
name2 = args[1]
return regex_match(name1, name2)
def range_match(pattern, pattern_index, test):
"""check the if char `test` in string is match with the scope of [...] in pattern"""
pattern_len = len(pattern)
if pattern_index == pattern_len:
return -1
negate = pattern[pattern_index] == "!" or pattern[pattern_index] == "^"
if negate:
pattern_index += 1
ok = 0
while True:
if pattern_index == pattern_len:
break
c = pattern[pattern_index]
pattern_index += 1
if c == "]":
break
if c == "\\":
if pattern_index == pattern_len:
return -1
c = pattern[pattern_index]
pattern_index += 1
if (
pattern_index != pattern_len
and pattern[pattern_index] == "-"
and pattern_index + 1 != pattern_len
and pattern[pattern_index + 1] != "]"
):
c2 = pattern[pattern_index + 1]
pattern_index += 2
if c2 == "\\":
if pattern_index == pattern_len:
return -1
c2 = pattern[pattern_index]
pattern_index += 1
if c <= test <= c2:
ok = 1
elif c == test:
ok = 1
if ok == negate:
return -1
else:
return pattern_index
def glob_match(string, pattern):
"""determines whether string matches the pattern in glob expression."""
pattern_len = len(pattern)
string_len = len(string)
if pattern_len == 0:
return string_len == 0
pattern_index = 0
string_index = 0
while True:
if pattern_index == pattern_len:
return string_len == string_index
c = pattern[pattern_index]
pattern_index += 1
if c == "?":
if string_index == string_len:
return False
if string[string_index] == "/":
return False
string_index += 1
continue
if c == "*":
while (pattern_index != pattern_len) and (c == "*"):
c = pattern[pattern_index]
pattern_index += 1
if pattern_index == pattern_len:
return string.find("/", string_index) == -1
else:
if c == "/":
string_index = string.find("/", string_index)
if string_index == -1:
return False
else:
string_index += 1
# General case, use recursion.
while string_index != string_len:
if glob_match(string[string_index:], pattern[pattern_index:]):
return True
if string[string_index] == "/":
break
string_index += 1
continue
if c == "[":
if string_index == string_len:
return False
if string[string_index] == "/":
return False
pattern_index = range_match(pattern, pattern_index, string[string_index])
if pattern_index == -1:
return False
string_index += 1
continue
if c == "\\":
if pattern_index == pattern_len:
c = "\\"
else:
c = pattern[pattern_index]
pattern_index += 1
# fall through
# other cases and c == "\\"
if string_index == string_len:
return False
else:
if c == string[string_index]:
string_index += 1
else:
return False
def glob_match_func(*args):
"""the wrapper for globMatch."""
string = args[0]
pattern = args[1]
return glob_match(string, pattern)
def ip_match(ip1, ip2):
"""IPMatch determines whether IP address ip1 matches the pattern of IP address ip2, ip2 can be an IP address or a CIDR pattern.
For example, "192.168.2.123" matches "192.168.2.0/24"
"""
ip1 = ipaddress.ip_address(ip1)
try:
network = ipaddress.ip_network(ip2, strict=False)
return ip1 in network
except ValueError:
return ip1 == ip2
def ip_match_func(*args):
"""the wrapper for IPMatch."""
ip1 = args[0]
ip2 = args[1]
return ip_match(ip1, ip2)
def generate_g_function(rm):
"""the factory method of the g(_, _) function."""
def f(*args):
name1 = args[0]
name2 = args[1]
if not rm:
return name1 == name2
elif 2 == len(args):
return rm.has_link(name1, name2)
else:
domain = str(args[2])
return rm.has_link(name1, name2, domain)
return f | /sa_casbin-1.1.0-py3-none-any.whl/casbin/util/builtin_operators.py | 0.523177 | 0.294215 | builtin_operators.py | pypi |
import logging
DEFAULT_SEP = ","
class Policy:
def __init__(self):
self.logger = logging.getLogger(__name__)
self.model = {}
def __getitem__(self, item):
return self.model.get(item)
def __setitem__(self, key, value):
self.model[key] = value
def keys(self):
return self.model.keys()
def values(self):
return self.model.values()
def items(self):
return self.model.items()
def build_role_links(self, rm_map):
"""initializes the roles in RBAC."""
if "g" not in self.keys():
return
for ptype, ast in self["g"].items():
rm = rm_map[ptype]
ast.build_role_links(rm)
def build_incremental_role_links(self, rm, op, sec, ptype, rules):
if sec == "g":
self[sec].get(ptype).build_incremental_role_links(rm, op, rules)
def print_policy(self):
"""Log using info"""
self.logger.info("Policy:")
for sec in ["p", "g"]:
if sec not in self.keys():
continue
for key, ast in self[sec].items():
self.logger.info("{} : {} : {}".format(key, ast.value, ast.policy))
def clear_policy(self):
"""clears all current policy."""
for sec in ["p", "g"]:
if sec not in self.keys():
continue
for key in self[sec].keys():
self[sec][key].policy = []
def get_policy(self, sec, ptype):
"""gets all rules in a policy."""
return self[sec][ptype].policy
def get_filtered_policy(self, sec, ptype, field_index, *field_values):
"""gets rules based on field filters from a policy."""
return [
rule
for rule in self[sec][ptype].policy
if all(
(callable(value) and value(rule[field_index + i])) or (value == "" or rule[field_index + i] == value)
for i, value in enumerate(field_values)
)
]
def has_policy(self, sec, ptype, rule):
"""determines whether a model has the specified policy rule."""
if sec not in self.keys():
return False
if ptype not in self[sec]:
return False
return rule in self[sec][ptype].policy
def add_policy(self, sec, ptype, rule):
"""adds a policy rule to the model."""
assertion = self[sec][ptype]
if not self.has_policy(sec, ptype, rule):
assertion.policy.append(rule)
else:
return False
if sec == "p" and assertion.priority_index >= 0:
try:
idx_insert = int(rule[assertion.priority_index])
i = len(assertion.policy) - 1
for i in range(i, 0, -1):
try:
idx = int(assertion.policy[i - 1][assertion.priority_index])
except Exception as e:
print(e)
if idx > idx_insert:
assertion.policy[i] = assertion.policy[i - 1]
else:
break
assertion.policy[i] = rule
assertion.policy_map[DEFAULT_SEP.join(rule)] = i
except Exception as e:
print(e)
assertion.policy_map[DEFAULT_SEP.join(rule)] = len(assertion.policy) - 1
return True
def add_policies(self, sec, ptype, rules):
"""adds policy rules to the model."""
for rule in rules:
if self.has_policy(sec, ptype, rule):
return False
for rule in rules:
self[sec][ptype].policy.append(rule)
return True
def update_policy(self, sec, ptype, old_rule, new_rule):
"""update a policy rule from the model."""
if sec not in self.keys():
return False
if ptype not in self[sec]:
return False
ast = self[sec][ptype]
if old_rule in ast.policy:
rule_index = ast.policy.index(old_rule)
else:
return False
if "p_priority" in ast.tokens:
priority_index = ast.tokens.index("p_priority")
if old_rule[priority_index] == new_rule[priority_index]:
ast.policy[rule_index] = new_rule
else:
raise Exception("New rule should have the same priority with old rule.")
else:
ast.policy[rule_index] = new_rule
return True
def update_policies(self, sec, ptype, old_rules, new_rules):
"""update policy rules from the model."""
if sec not in self.keys():
return False
if ptype not in self[sec]:
return False
if len(old_rules) != len(new_rules):
return False
ast = self[sec][ptype]
old_rules_index = []
for old_rule in old_rules:
if old_rule in ast.policy:
old_rules_index.append(ast.policy.index(old_rule))
else:
return False
if "p_priority" in ast.tokens:
priority_index = ast.tokens.index("p_priority")
for idx, old_rule, new_rule in zip(old_rules_index, old_rules, new_rules):
if old_rule[priority_index] == new_rule[priority_index]:
ast.policy[idx] = new_rule
else:
raise Exception("New rule should have the same priority with old rule.")
else:
for idx, old_rule, new_rule in zip(old_rules_index, old_rules, new_rules):
ast.policy[idx] = new_rule
return True
def remove_policy(self, sec, ptype, rule):
"""removes a policy rule from the model."""
if not self.has_policy(sec, ptype, rule):
return False
self[sec][ptype].policy.remove(rule)
return rule not in self[sec][ptype].policy
def remove_policies(self, sec, ptype, rules):
"""RemovePolicies removes policy rules from the model."""
for rule in rules:
if not self.has_policy(sec, ptype, rule):
return False
self[sec][ptype].policy.remove(rule)
if rule in self[sec][ptype].policy:
return False
return True
def remove_policies_with_effected(self, sec, ptype, rules):
effected = []
for rule in rules:
if self.has_policy(sec, ptype, rule):
effected.append(rule)
self.remove_policy(sec, ptype, rule)
return effected
def remove_filtered_policy_returns_effects(self, sec, ptype, field_index, *field_values):
"""
remove_filtered_policy_returns_effects removes policy rules based on field filters from the model.
"""
tmp = []
effects = []
if len(field_values) == 0:
return []
if sec not in self.keys():
return []
if ptype not in self[sec]:
return []
for rule in self[sec][ptype].policy:
if all(value == "" or rule[field_index + i] == value for i, value in enumerate(field_values)):
effects.append(rule)
else:
tmp.append(rule)
self[sec][ptype].policy = tmp
return effects
def remove_filtered_policy(self, sec, ptype, field_index, *field_values):
"""removes policy rules based on field filters from the model."""
tmp = []
res = False
if sec not in self.keys():
return res
if ptype not in self[sec]:
return res
for rule in self[sec][ptype].policy:
if all(value == "" or rule[field_index + i] == value for i, value in enumerate(field_values)):
res = True
else:
tmp.append(rule)
self[sec][ptype].policy = tmp
return res
def get_values_for_field_in_policy(self, sec, ptype, field_index):
"""gets all values for a field for all rules in a policy, duplicated values are removed."""
values = []
if sec not in self.keys():
return values
if ptype not in self[sec]:
return values
for rule in self[sec][ptype].policy:
value = rule[field_index]
if value not in values:
values.append(value)
return values | /sa_casbin-1.1.0-py3-none-any.whl/casbin/model/policy.py | 0.62395 | 0.282233 | policy.py | pypi |
from . import Assertion
from casbin import util, config
from .policy import Policy
DEFAULT_DOMAIN = ""
DEFAULT_SEPARATOR = "::"
class Model(Policy):
section_name_map = {
"r": "request_definition",
"p": "policy_definition",
"g": "role_definition",
"e": "policy_effect",
"m": "matchers",
}
def _load_assertion(self, cfg, sec, key):
value = cfg.get(self.section_name_map[sec] + "::" + key)
return self.add_def(sec, key, value)
def add_def(self, sec, key, value):
if value == "":
return
ast = Assertion()
ast.key = key
ast.value = value
if "r" == sec or "p" == sec:
ast.tokens = ast.value.split(",")
for i, token in enumerate(ast.tokens):
ast.tokens[i] = key + "_" + token.strip()
else:
ast.value = util.remove_comments(util.escape_assertion(ast.value))
if sec not in self.keys():
self[sec] = {}
self[sec][key] = ast
return True
def _get_key_suffix(self, i):
if i == 1:
return ""
return str(i)
def _load_section(self, cfg, sec):
i = 1
while True:
if not self._load_assertion(cfg, sec, sec + self._get_key_suffix(i)):
break
else:
i = i + 1
def load_model(self, path):
cfg = config.Config.new_config(path)
self._load_section(cfg, "r")
self._load_section(cfg, "p")
self._load_section(cfg, "e")
self._load_section(cfg, "m")
self._load_section(cfg, "g")
def load_model_from_text(self, text):
cfg = config.Config.new_config_from_text(text)
self._load_section(cfg, "r")
self._load_section(cfg, "p")
self._load_section(cfg, "e")
self._load_section(cfg, "m")
self._load_section(cfg, "g")
def print_model(self):
self.logger.info("Model:")
for k, v in self.items():
for i, j in v.items():
self.logger.info("%s.%s: %s", k, i, j.value)
def sort_policies_by_priority(self):
for ptype, assertion in self["p"].items():
for index, token in enumerate(assertion.tokens):
if token == f"{ptype}_priority":
assertion.priority_index = index
break
if assertion.priority_index == -1:
continue
assertion.policy = sorted(assertion.policy, key=lambda x: x[assertion.priority_index])
for i, policy in enumerate(assertion.policy):
assertion.policy_map[",".join(policy)] = i
return None
def sort_policies_by_subject_hierarchy(self):
if self["e"]["e"].value != "subjectPriority(p_eft) || deny":
return
sub_index = 0
domain_index = -1
for ptype, assertion in self["p"].items():
for index, token in enumerate(assertion.tokens):
if token == "{}_dom".format(ptype):
domain_index = index
break
subject_hierarchy_map = self.get_subject_hierarchy_map(self["g"]["g"].policy)
def compare_policy(policy):
domain = DEFAULT_DOMAIN
if domain_index != -1:
domain = policy[domain_index]
name = self.get_name_with_domain(domain, policy[sub_index])
return subject_hierarchy_map[name]
assertion.policy = sorted(assertion.policy, key=compare_policy, reverse=True)
for i, policy in enumerate(assertion.policy):
assertion.policy_map[",".join(policy)] = i
def get_subject_hierarchy_map(self, policies):
subject_hierarchy_map = {}
# Tree structure of role
policy_map = {}
for policy in policies:
if len(policy) < 2:
raise RuntimeError("policy g expect 2 more params")
domain = DEFAULT_DOMAIN
if len(policy) != 2:
domain = policy[2]
child = self.get_name_with_domain(domain, policy[0])
parent = self.get_name_with_domain(domain, policy[1])
if parent not in policy_map.keys():
policy_map[parent] = [child]
else:
policy_map[parent].append(child)
if child not in subject_hierarchy_map.keys():
subject_hierarchy_map[child] = 0
if parent not in subject_hierarchy_map.keys():
subject_hierarchy_map[parent] = 0
subject_hierarchy_map[child] = 1
# Use queues for levelOrder
queue = []
for k, v in subject_hierarchy_map.items():
root = k
if v != 0:
continue
lv = 0
queue.append(root)
while len(queue) != 0:
sz = len(queue)
for _ in range(sz):
node = queue.pop(0)
subject_hierarchy_map[node] = lv
if node in policy_map.keys():
for child in policy_map[node]:
queue.append(child)
lv += 1
return subject_hierarchy_map
def get_name_with_domain(self, domain, name):
return "{}{}{}".format(domain, DEFAULT_SEPARATOR, name)
def to_text(self):
s = []
def write_string(sec):
for p_type in self[sec]:
value = self[sec][p_type].value
s.append("{} = {}\n".format(sec, value.replace("p_", "p.").replace("r_", "r.")))
s.append("[request_definition]\n")
write_string("r")
s.append("[policy_definition]\n")
write_string("p")
if "g" in self.keys():
s.append("[role_definition]\n")
for p_type in self["g"]:
s.append("{} = {}\n".format(p_type, self["g"][p_type].value))
s.append("[policy_effect]\n")
write_string("e")
s.append("[matchers]\n")
write_string("m")
# remove last \n
s[-1] = s[-1].strip()
return "".join(s) | /sa_casbin-1.1.0-py3-none-any.whl/casbin/model/model.py | 0.479504 | 0.276862 | model.py | pypi |
from functools import wraps
from sa_decor import globals as G
class with_session:
def __init__(
self, *, commit: bool = None, force_commit: bool = None, session_maker=None
):
"""Decorate a function and declare it with a named only parameter `session`
`def foo(a, b, c=False, *, session):`
Then call the function either with an existing session to pass it to the function
`foo(1, 2, session=my_session)`
Or let the decorator create one for you
`foo(1, 2)`
:param commit: Commit the session after the function
:param force_commit: Commit even when an exception occurs
:param session_maker: Pass a session maker if the global is not set (e.g. when using multiple session makers in the application)
:raises ValueError: If the combination of arguments is invalid
"""
if commit is None and force_commit is None:
raise ValueError("commit or force_commit must be specified")
if commit is False and force_commit:
raise ValueError("cannot force commit and not commit")
self.commit = commit
self.force_commit = force_commit
self._session_maker = None
self._supplied_session_maker = session_maker
def __call__(self, func):
@wraps(func)
def session_manager(*args, **kwargs):
if kwargs.get("session", None):
return func(*args, **kwargs)
with self.session_maker() as sess:
error = False
try:
result = func(*args, **kwargs, session=sess)
if self.commit:
sess.commit()
return result
except Exception:
error = True
raise
finally:
if self.force_commit and error:
sess.commit()
return session_manager
@property
def session_maker(self):
if self._session_maker is None:
if self._supplied_session_maker is None and G._session_mkr is None:
raise ValueError("specify session_maker or set_global_session_maker")
self._session_maker = (
self._supplied_session_maker
if self._supplied_session_maker is not None
else G._session_mkr
)
return self._session_maker | /sa_decor-1.0.1.tar.gz/sa_decor-1.0.1/sa_decor/session.py | 0.775732 | 0.253887 | session.py | pypi |
import math
import matplotlib.pyplot as plt
from .Generaldistribution import Distribution
class Gaussian(Distribution):
""" Gaussian distribution class for calculating and
visualizing a Gaussian distribution.
Attributes:
mean (float) representing the mean value of the distribution
stdev (float) representing the standard deviation of the distribution
data_list (list of floats) a list of floats extracted from the data file
"""
def __init__(self, mu=0, sigma=1):
Distribution.__init__(self, mu, sigma)
def calculate_mean(self):
"""Function to calculate the mean of the data set.
Args:
None
Returns:
float: mean of the data set
"""
avg = 1.0 * sum(self.data) / len(self.data)
self.mean = avg
return self.mean
def calculate_stdev(self, sample=True):
"""Function to calculate the standard deviation of the data set.
Args:
sample (bool): whether the data represents a sample or population
Returns:
float: standard deviation of the data set
"""
if sample:
n = len(self.data) - 1
else:
n = len(self.data)
mean = self.calculate_mean()
sigma = 0
for d in self.data:
sigma += (d - mean) ** 2
sigma = math.sqrt(sigma / n)
self.stdev = sigma
return self.stdev
def plot_histogram(self):
"""Function to output a histogram of the instance variable data using
matplotlib pyplot library.
Args:
None
Returns:
None
"""
plt.hist(self.data)
plt.title('Histogram of Data')
plt.xlabel('data')
plt.ylabel('count')
def pdf(self, x):
"""Probability density function calculator for the gaussian distribution.
Args:
x (float): point for calculating the probability density function
Returns:
float: probability density function output
"""
return (1.0 / (self.stdev * math.sqrt(2*math.pi))) * math.exp(-0.5*((x - self.mean) / self.stdev) ** 2)
def plot_histogram_pdf(self, n_spaces = 50):
"""Function to plot the normalized histogram of the data and a plot of the
probability density function along the same range
Args:
n_spaces (int): number of data points
Returns:
list: x values for the pdf plot
list: y values for the pdf plot
"""
mu = self.mean
sigma = self.stdev
min_range = min(self.data)
max_range = max(self.data)
# calculates the interval between x values
interval = 1.0 * (max_range - min_range) / n_spaces
x = []
y = []
# calculate the x values to visualize
for i in range(n_spaces):
tmp = min_range + interval*i
x.append(tmp)
y.append(self.pdf(tmp))
# make the plots
fig, axes = plt.subplots(2,sharex=True)
fig.subplots_adjust(hspace=.5)
axes[0].hist(self.data, density=True)
axes[0].set_title('Normed Histogram of Data')
axes[0].set_ylabel('Density')
axes[1].plot(x, y)
axes[1].set_title('Normal Distribution for \n Sample Mean and Sample Standard Deviation')
axes[0].set_ylabel('Density')
plt.show()
return x, y
def __add__(self, other):
"""Function to add together two Gaussian distributions
Args:
other (Gaussian): Gaussian instance
Returns:
Gaussian: Gaussian distribution
"""
result = Gaussian()
result.mean = self.mean + other.mean
result.stdev = math.sqrt(self.stdev ** 2 + other.stdev ** 2)
return result
def __repr__(self):
"""Function to output the characteristics of the Gaussian instance
Args:
None
Returns:
string: characteristics of the Gaussian
"""
return "mean {}, standard deviation {}".format(self.mean, self.stdev) | /sa_distributions-0.1.tar.gz/sa_distributions-0.1/sa_distributions/Gaussiandistribution.py | 0.688364 | 0.853058 | Gaussiandistribution.py | pypi |
from collections import namedtuple
from collections.abc import Iterable
from inspect import signature
from itertools import chain
from sqlalchemy import and_, or_, not_, func
from .exceptions import BadFilterFormat, BadSpec
from .models import Field, auto_join, get_model_from_spec, get_default_model, \
get_class_by_tablename
BooleanFunction = namedtuple(
'BooleanFunction', ('key', 'sqlalchemy_fn', 'only_one_arg')
)
BOOLEAN_FUNCTIONS = [
BooleanFunction('or', or_, False),
BooleanFunction('and', and_, False),
BooleanFunction('not', not_, True),
]
"""
Sqlalchemy boolean functions that can be parsed from the filter definition.
"""
class Operator(object):
OPERATORS = {
'is_null': lambda f: f.is_(None),
'is_not_null': lambda f: f.isnot(None),
'==': lambda f, a: f == a,
'eq': lambda f, a: f == a,
'!=': lambda f, a: f != a,
'ne': lambda f, a: f != a,
'>': lambda f, a: f > a,
'gt': lambda f, a: f > a,
'<': lambda f, a: f < a,
'lt': lambda f, a: f < a,
'>=': lambda f, a: f >= a,
'ge': lambda f, a: f >= a,
'<=': lambda f, a: f <= a,
'le': lambda f, a: f <= a,
'like': lambda f, a: f.like(a),
'ilike': lambda f, a: f.ilike(a),
'not_ilike': lambda f, a: ~f.ilike(a),
'in': lambda f, a: f.in_(a),
'not_in': lambda f, a: ~f.in_(a),
'any': lambda f, a: f.any(a),
'not_any': lambda f, a: func.not_(f.any(a)),
}
def __init__(self, operator=None):
if not operator:
operator = '=='
if operator not in self.OPERATORS:
raise BadFilterFormat('Operator `{}` not valid.'.format(operator))
self.operator = operator
self.function = self.OPERATORS[operator]
self.arity = len(signature(self.function).parameters)
class Filter(object):
def __init__(self, filter_spec):
self.filter_spec = filter_spec
try:
filter_spec['field']
except KeyError:
raise BadFilterFormat('`field` is a mandatory filter attribute.')
except TypeError:
raise BadFilterFormat(
'Filter spec `{}` should be a dictionary.'.format(filter_spec)
)
self.operator = Operator(filter_spec.get('op'))
self.value = filter_spec.get('value')
value_present = True if 'value' in filter_spec else False
if not value_present and self.operator.arity == 2:
raise BadFilterFormat('`value` must be provided.')
def get_named_models(self):
if all(k in self.filter_spec for k in ('model', 'table')):
raise BadFilterFormat(
'Only one field `model` or `table` must be provided.'
)
elif "model" in self.filter_spec:
return {self.filter_spec['model']}
elif "table" in self.filter_spec:
model = get_class_by_tablename(self.filter_spec['table'])
if model is None:
raise BadSpec(
'The query does not contain table `{}`.'.format(
self.filter_spec['table']
)
)
return {model.__name__}
else:
return set()
def format_for_sqlalchemy(self, query, default_model):
filter_spec = self.filter_spec
operator = self.operator
value = self.value
model = get_model_from_spec(filter_spec, query, default_model)
function = operator.function
arity = operator.arity
field_name = self.filter_spec['field']
field = Field(model, field_name)
sqlalchemy_field = field.get_sqlalchemy_field()
if arity == 1:
return function(sqlalchemy_field)
if arity == 2:
return function(sqlalchemy_field, value)
class BooleanFilter(object):
def __init__(self, function, *filters):
self.function = function
self.filters = filters
def get_named_models(self):
models = set()
for filter in self.filters:
models.update(filter.get_named_models())
return models
def format_for_sqlalchemy(self, query, default_model):
return self.function(*[
filter.format_for_sqlalchemy(query, default_model)
for filter in self.filters
])
def _is_iterable_filter(filter_spec):
""" `filter_spec` may be a list of nested filter specs, or a dict.
"""
return (
isinstance(filter_spec, Iterable) and
not isinstance(filter_spec, (str, dict))
)
def build_filters(filter_spec):
""" Recursively process `filter_spec` """
if _is_iterable_filter(filter_spec):
return list(chain.from_iterable(
build_filters(item) for item in filter_spec
))
if isinstance(filter_spec, dict):
# Check if filter spec defines a boolean function.
for boolean_function in BOOLEAN_FUNCTIONS:
if boolean_function.key in filter_spec:
# The filter spec is for a boolean-function
# Get the function argument definitions and validate
fn_args = filter_spec[boolean_function.key]
if not _is_iterable_filter(fn_args):
raise BadFilterFormat(
'`{}` value must be an iterable across the function '
'arguments'.format(boolean_function.key)
)
if boolean_function.only_one_arg and len(fn_args) != 1:
raise BadFilterFormat(
'`{}` must have one argument'.format(
boolean_function.key
)
)
if not boolean_function.only_one_arg and len(fn_args) < 1:
raise BadFilterFormat(
'`{}` must have one or more arguments'.format(
boolean_function.key
)
)
return [
BooleanFilter(
boolean_function.sqlalchemy_fn, *build_filters(fn_args)
)
]
return [Filter(filter_spec)]
def get_named_models(filters):
models = set()
for filter in filters:
models.update(filter.get_named_models())
return models
def apply_filters(query, filter_spec, do_auto_join=True):
"""Apply filters to a SQLAlchemy query or Select object.
:param query:
The statement to be processed. May be one of:
a :class:`sqlalchemy.orm.Query` instance or
a :class:`sqlalchemy.sql.expression.Select` instance.
:param filter_spec:
A dict or an iterable of dicts, where each one includes
the necesary information to create a filter to be applied to the
query.
Example::
filter_spec = [
{'model': 'Foo', 'field': 'name', 'op': '==', 'value': 'foo'},
]
If the query being modified refers to a single model, the `model` key
may be omitted from the filter spec.
Filters may be combined using boolean functions.
Example:
filter_spec = {
'or': [
{'model': 'Foo', 'field': 'id', 'op': '==', 'value': '1'},
{'model': 'Bar', 'field': 'id', 'op': '==', 'value': '2'},
]
}
:returns:
The :class:`sqlalchemy.orm.Query` or
the :class:`sqlalchemy.sql.expression.Select`
instance after all the filters have been applied.
"""
filters = build_filters(filter_spec)
default_model = get_default_model(query)
filter_models = get_named_models(filters)
if do_auto_join:
query = auto_join(query, *filter_models)
sqlalchemy_filters = [
filter.format_for_sqlalchemy(query, default_model)
for filter in filters
]
if sqlalchemy_filters:
query = query.filter(*sqlalchemy_filters)
return query | /sa-filters-1.3.0.tar.gz/sa-filters-1.3.0/sa_filters/filters.py | 0.838151 | 0.283 | filters.py | pypi |
from sqlalchemy.orm import Load
from .exceptions import BadLoadFormat
from .models import Field, auto_join, get_model_from_spec, get_default_model
class LoadOnly(object):
def __init__(self, load_spec):
self.load_spec = load_spec
try:
field_names = load_spec['fields']
except KeyError:
raise BadLoadFormat('`fields` is a mandatory attribute.')
except TypeError:
raise BadLoadFormat(
'Load spec `{}` should be a dictionary.'.format(load_spec)
)
self.field_names = field_names
def get_named_models(self):
if "model" in self.load_spec:
return {self.load_spec['model']}
return set()
def format_for_sqlalchemy(self, query, default_model):
load_spec = self.load_spec
field_names = self.field_names
model = get_model_from_spec(load_spec, query, default_model)
fields = [Field(model, field_name) for field_name in field_names]
return Load(model).load_only(
*[field.get_sqlalchemy_field() for field in fields]
)
def get_named_models(loads):
models = set()
for load in loads:
models.update(load.get_named_models())
return models
def apply_loads(query, load_spec):
"""Apply load restrictions to a :class:`sqlalchemy.orm.Query` instance
or a :class:`sqlalchemy.sql.expression.Select` instance.
:param load_spec:
A list of dictionaries, where each item contains the fields to load
for each model.
Example::
load_spec = [
{'model': 'Foo', fields': ['id', 'name']},
{'model': 'Bar', 'fields': ['name']},
]
If the query being modified refers to a single model, the `model` key
may be omitted from the load spec. The following shorthand form is
also accepted when the model can be inferred::
load_spec = ['id', 'name']
:returns:
The :class:`sqlalchemy.orm.Query` instance or
a :class:`sqlalchemy.sql.expression.Select` instance
after the load restrictions have been applied.
"""
if (
isinstance(load_spec, list) and
all(map(lambda item: isinstance(item, str), load_spec))
):
load_spec = {'fields': load_spec}
if isinstance(load_spec, dict):
load_spec = [load_spec]
loads = [LoadOnly(item) for item in load_spec]
default_model = get_default_model(query)
load_models = get_named_models(loads)
query = auto_join(query, *load_models)
sqlalchemy_loads = [
load.format_for_sqlalchemy(query, default_model) for load in loads
]
if sqlalchemy_loads:
query = query.options(*sqlalchemy_loads)
return query | /sa-filters-1.3.0.tar.gz/sa-filters-1.3.0/sa_filters/loads.py | 0.857619 | 0.284073 | loads.py | pypi |
import math
from collections import namedtuple
from sqlalchemy import select, func
from sqlalchemy.orm import Query
from .exceptions import InvalidPage
def apply_pagination(query, page_number=None, page_size=None, session=None):
"""Apply pagination to a SQLAlchemy query or Select object.
:param page_number:
Page to be returned (starts and defaults to 1).
:param page_size:
Maximum number of results to be returned in the page (defaults
to the total results).
:returns:
A 2-tuple with the paginated SQLAlchemy query or Select object
and a pagination namedtuple.
The pagination object contains information about the results
and pages: ``page_size`` (defaults to ``total_results``),
``page_number`` (defaults to 1), ``num_pages`` and
``total_results``.
Basic usage::
query, pagination = apply_pagination(query, 1, 10)
>>> len(query)
10
>>> pagination.page_size
10
>>> pagination.page_number
1
>>> pagination.num_pages
3
>>> pagination.total_results
22
>>> page_size, page_number, num_pages, total_results = pagination
"""
total_results = _calculate_total_results(query, session)
query = _limit(query, page_size)
# Page size defaults to total results
if page_size is None or (page_size > total_results and total_results > 0):
page_size = total_results
query = _offset(query, page_number, page_size)
# Page number defaults to 1
if page_number is None:
page_number = 1
num_pages = _calculate_num_pages(page_number, page_size, total_results)
Pagination = namedtuple(
'Pagination',
['page_number', 'page_size', 'num_pages', 'total_results']
)
return query, Pagination(page_number, page_size, num_pages, total_results)
def _limit(query, page_size):
if page_size is not None:
if page_size < 0:
raise InvalidPage(
'Page size should not be negative: {}'.format(page_size)
)
query = query.limit(page_size)
return query
def _offset(query, page_number, page_size):
if page_number is not None:
if page_number < 1:
raise InvalidPage(
'Page number should be positive: {}'.format(page_number)
)
query = query.offset((page_number - 1) * page_size)
return query
def _calculate_num_pages(page_number, page_size, total_results):
if page_size == 0:
return 0
return math.ceil(float(total_results) / float(page_size))
def _calculate_total_results(query, session):
if isinstance(query, Query):
return query.count()
return session.execute(
select(func.count()).select_from(query.subquery())
).scalar_one() | /sa-filters-1.3.0.tar.gz/sa-filters-1.3.0/sa_filters/pagination.py | 0.804521 | 0.425128 | pagination.py | pypi |
from sqlalchemy.exc import InvalidRequestError
from sqlalchemy.inspection import inspect
from sqlalchemy.orm import mapperlib, Query
from sqlalchemy.sql.util import find_tables
from sqlalchemy import Table
from sqlalchemy.ext.hybrid import hybrid_property, hybrid_method
import types
from .exceptions import BadQuery, FieldNotFound, BadSpec
class Field(object):
def __init__(self, model, field_name):
self.model = model
self.field_name = field_name
def get_sqlalchemy_field(self):
if self.field_name not in self._get_valid_field_names():
raise FieldNotFound(
'Model {} has no column `{}`.'.format(
self.model, self.field_name
)
)
sqlalchemy_field = getattr(self.model, self.field_name)
# If it's a hybrid method, then we call it so that we can work with
# the result of the execution and not with the method object itself
if isinstance(sqlalchemy_field, types.MethodType):
sqlalchemy_field = sqlalchemy_field()
return sqlalchemy_field
def _get_valid_field_names(self):
inspect_mapper = inspect(self.model)
columns = inspect_mapper.columns
orm_descriptors = inspect_mapper.all_orm_descriptors
column_names = columns.keys()
hybrid_names = [
key for key, item in orm_descriptors.items()
if type(item) in [hybrid_property, hybrid_method]
]
return set(column_names) | set(hybrid_names)
def get_model_from_table(table): # pragma: nocover
"""Resolve model class from table object"""
for registry in mapperlib._all_registries():
for mapper in registry.mappers:
if table in mapper.tables:
return mapper.class_
return None
def get_class_by_tablename(tablename):
"""Return class reference mapped to table.
:param tablename: String with name of table.
:return: Class reference or None.
"""
for registry in mapperlib._all_registries():
for mapper in registry.mappers:
model = mapper.class_
if model.__tablename__ == tablename:
return model
return None
def get_query_models(query):
"""Get models from query.
:param query:
A :class:`sqlalchemy.orm.Query` instance.
:returns:
A dictionary with all the models included in the query.
"""
if isinstance(query, Query):
stmt = query.statement
else:
stmt = query
tables = find_tables(stmt, check_columns=True, include_joins=True)
models = []
for t in tables:
if isinstance(t, Table):
model = get_model_from_table(t)
if model not in models:
models.append(model)
return {model.__name__: model for model in models if model}
def get_model_from_spec(spec, query, default_model=None):
""" Determine the model to which a spec applies on a given query.
A spec that does not specify a model may be applied to a query that
contains a single model. Otherwise the spec must specify the model to
which it applies, and that model must be present in the query.
:param query:
A :class:`sqlalchemy.orm.Query` instance.
:param spec:
A dictionary that may or may not contain a model name to resolve
against the query.
:returns:
A model instance.
:raise BadSpec:
If the spec is ambiguous or refers to a model not in the query.
:raise BadQuery:
If the query contains no models.
"""
models = get_query_models(query)
if not models:
raise BadQuery('The query does not contain any models.')
model_name = spec.get('model')
if "table" in spec:
model = get_class_by_tablename(spec.get('table'))
model_name = model.__name__
if model_name is not None:
models = [v for (k, v) in models.items() if k == model_name]
if not models:
raise BadSpec(
'The query does not contain model `{}`.'.format(model_name)
)
model = models[0]
else:
if len(models) == 1:
model = list(models.values())[0]
elif default_model is not None:
return default_model
else:
raise BadSpec("Ambiguous spec. Please specify a model.")
return model
def get_model_class_by_name(registry, name):
""" Return the model class matching `name` in the given `registry`.
"""
for cls in registry.values():
if getattr(cls, '__name__', None) == name:
return cls
def get_default_model(query):
""" Return the singular model from `query`, or `None` if `query` contains
multiple models.
"""
query_models = get_query_models(query).values()
if len(query_models) == 1:
default_model, = iter(query_models)
else:
default_model = None
return default_model
def auto_join(query, *model_names):
""" Automatically join models to `query` if they're not already present
and the join can be done implicitly.
"""
# every model has access to the registry, so we can use any from the query
query_models = get_query_models(query).values()
model_registry = list(query_models)[-1].registry._class_registry
for name in model_names:
model = get_model_class_by_name(model_registry, name)
if model and (model not in get_query_models(query).values()):
try:
# https://docs.sqlalchemy.org/en/14/changelog/migration_14.html
# Many Core and ORM statement objects now perform much of
# their construction and validation in the compile phase
tmp = query.join(model)
if isinstance(query, Query):
tmp.statement.compile()
else:
tmp.compile()
query = tmp
except InvalidRequestError:
pass # can't be autojoined
return query | /sa-filters-1.3.0.tar.gz/sa-filters-1.3.0/sa_filters/models.py | 0.771672 | 0.367554 | models.py | pypi |
from .exceptions import BadSortFormat
from .models import Field, auto_join, get_model_from_spec, get_default_model
SORT_ASCENDING = 'asc'
SORT_DESCENDING = 'desc'
class Sort(object):
def __init__(self, sort_spec):
self.sort_spec = sort_spec
try:
field_name = sort_spec['field']
direction = sort_spec['direction']
except KeyError:
raise BadSortFormat(
'`field` and `direction` are mandatory attributes.'
)
except TypeError:
raise BadSortFormat(
'Sort spec `{}` should be a dictionary.'.format(sort_spec)
)
if direction not in [SORT_ASCENDING, SORT_DESCENDING]:
raise BadSortFormat('Direction `{}` not valid.'.format(direction))
self.field_name = field_name
self.direction = direction
self.nullsfirst = sort_spec.get('nullsfirst')
self.nullslast = sort_spec.get('nullslast')
def get_named_models(self):
if "model" in self.sort_spec:
return {self.sort_spec['model']}
return set()
def format_for_sqlalchemy(self, query, default_model):
sort_spec = self.sort_spec
direction = self.direction
field_name = self.field_name
model = get_model_from_spec(sort_spec, query, default_model)
field = Field(model, field_name)
sqlalchemy_field = field.get_sqlalchemy_field()
if direction == SORT_ASCENDING:
sort_fnc = sqlalchemy_field.asc
elif direction == SORT_DESCENDING:
sort_fnc = sqlalchemy_field.desc
if self.nullsfirst:
return sort_fnc().nullsfirst()
elif self.nullslast:
return sort_fnc().nullslast()
else:
return sort_fnc()
def get_named_models(sorts):
models = set()
for sort in sorts:
models.update(sort.get_named_models())
return models
def apply_sort(query, sort_spec):
"""Apply sorting to a :class:`sqlalchemy.orm.Query` instance or
a :class:`sqlalchemy.sql.expression.Select` instance.
:param sort_spec:
A list of dictionaries, where each one of them includes
the necesary information to order the elements of the query.
Example::
sort_spec = [
{'model': 'Foo', 'field': 'name', 'direction': 'asc'},
{'model': 'Bar', 'field': 'id', 'direction': 'desc'},
{
'model': 'Qux',
'field': 'surname',
'direction': 'desc',
'nullslast': True,
},
{
'model': 'Baz',
'field': 'count',
'direction': 'asc',
'nullsfirst': True,
},
]
If the query being modified refers to a single model, the `model` key
may be omitted from the sort spec.
:returns:
The :class:`sqlalchemy.orm.Query` instance or
the :class:`sqlalchemy.sql.expression.Select` after the provided
sorting has been applied.
"""
if isinstance(sort_spec, dict):
sort_spec = [sort_spec]
sorts = [Sort(item) for item in sort_spec]
default_model = get_default_model(query)
sort_models = get_named_models(sorts)
query = auto_join(query, *sort_models)
sqlalchemy_sorts = [
sort.format_for_sqlalchemy(query, default_model) for sort in sorts
]
if sqlalchemy_sorts:
query = query.order_by(*sqlalchemy_sorts)
return query | /sa-filters-1.3.0.tar.gz/sa-filters-1.3.0/sa_filters/sorting.py | 0.796015 | 0.210259 | sorting.py | pypi |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.