repository_name
stringlengths
5
67
func_path_in_repository
stringlengths
4
234
func_name
stringlengths
0
314
whole_func_string
stringlengths
52
3.87M
language
stringclasses
6 values
func_code_string
stringlengths
52
3.87M
func_code_tokens
listlengths
15
672k
func_documentation_string
stringlengths
1
47.2k
func_documentation_tokens
listlengths
1
3.92k
split_name
stringclasses
1 value
func_code_url
stringlengths
85
339
yourcelf/escapejson
escapejson/escapejson.py
escapejson
def escapejson(string): ''' Escape `string`, which should be syntactically valid JSON (this is not verified), so that it is safe for inclusion in HTML <script> environments and as literal javascript. ''' for fro, to in replacements: string = string.replace(fro, to) return string
python
def escapejson(string): ''' Escape `string`, which should be syntactically valid JSON (this is not verified), so that it is safe for inclusion in HTML <script> environments and as literal javascript. ''' for fro, to in replacements: string = string.replace(fro, to) return string
[ "def", "escapejson", "(", "string", ")", ":", "for", "fro", ",", "to", "in", "replacements", ":", "string", "=", "string", ".", "replace", "(", "fro", ",", "to", ")", "return", "string" ]
Escape `string`, which should be syntactically valid JSON (this is not verified), so that it is safe for inclusion in HTML <script> environments and as literal javascript.
[ "Escape", "string", "which", "should", "be", "syntactically", "valid", "JSON", "(", "this", "is", "not", "verified", ")", "so", "that", "it", "is", "safe", "for", "inclusion", "in", "HTML", "<script", ">", "environments", "and", "as", "literal", "javascript"...
train
https://github.com/yourcelf/escapejson/blob/5b39160ae619542cc16db7a443b752d64ff5c416/escapejson/escapejson.py#L10-L18
tkf/rash
rash/interactive_search.py
strip_glob
def strip_glob(string, split_str=' '): """ Strip glob portion in `string`. >>> strip_glob('*glob*like') 'glob like' >>> strip_glob('glob?') 'glo' >>> strip_glob('glob[seq]') 'glob' >>> strip_glob('glob[!seq]') 'glob' :type string: str :rtype: str """ string = _GLOB_PORTION_RE.sub(split_str, string) return string.strip()
python
def strip_glob(string, split_str=' '): """ Strip glob portion in `string`. >>> strip_glob('*glob*like') 'glob like' >>> strip_glob('glob?') 'glo' >>> strip_glob('glob[seq]') 'glob' >>> strip_glob('glob[!seq]') 'glob' :type string: str :rtype: str """ string = _GLOB_PORTION_RE.sub(split_str, string) return string.strip()
[ "def", "strip_glob", "(", "string", ",", "split_str", "=", "' '", ")", ":", "string", "=", "_GLOB_PORTION_RE", ".", "sub", "(", "split_str", ",", "string", ")", "return", "string", ".", "strip", "(", ")" ]
Strip glob portion in `string`. >>> strip_glob('*glob*like') 'glob like' >>> strip_glob('glob?') 'glo' >>> strip_glob('glob[seq]') 'glob' >>> strip_glob('glob[!seq]') 'glob' :type string: str :rtype: str
[ "Strip", "glob", "portion", "in", "string", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/interactive_search.py#L31-L49
intiocean/pyinter
pyinter/examples/daterange.py
daterange
def daterange(start, end, delta=timedelta(days=1), lower=Interval.CLOSED, upper=Interval.OPEN): """Returns a generator which creates the next value in the range on demand""" date_interval = Interval(lower=lower, lower_value=start, upper_value=end, upper=upper) current = start if start in date_interval else start + delta while current in date_interval: yield current current = current + delta
python
def daterange(start, end, delta=timedelta(days=1), lower=Interval.CLOSED, upper=Interval.OPEN): """Returns a generator which creates the next value in the range on demand""" date_interval = Interval(lower=lower, lower_value=start, upper_value=end, upper=upper) current = start if start in date_interval else start + delta while current in date_interval: yield current current = current + delta
[ "def", "daterange", "(", "start", ",", "end", ",", "delta", "=", "timedelta", "(", "days", "=", "1", ")", ",", "lower", "=", "Interval", ".", "CLOSED", ",", "upper", "=", "Interval", ".", "OPEN", ")", ":", "date_interval", "=", "Interval", "(", "lowe...
Returns a generator which creates the next value in the range on demand
[ "Returns", "a", "generator", "which", "creates", "the", "next", "value", "in", "the", "range", "on", "demand" ]
train
https://github.com/intiocean/pyinter/blob/fb6e904307477fa43123cc9ab326680aa1a8cd62/pyinter/examples/daterange.py#L5-L11
pebble/libpebble2
libpebble2/services/appmessage.py
AppMessageService.send_message
def send_message(self, target_app, dictionary): """ Send a message to the given app, which should be currently running on the Pebble (unless using a non-standard AppMessage endpoint, in which case its rules apply). AppMessage can only represent flat dictionaries with integer keys; as such, ``dictionary`` must be flat and have integer keys. Because the AppMessage dictionary type is more expressive than Python's native types allow, all entries in the dictionary provided must be wrapped in one of the value types: ======================= ============= ============ AppMessageService type C type Python type ======================= ============= ============ :class:`Uint8` ``uint8_t`` :any:`int` :class:`Uint16` ``uint16_t`` :any:`int` :class:`Uint32` ``uint32_t`` :any:`int` :class:`Int8` ``int8_t`` :any:`int` :class:`Int16` ``int16_t`` :any:`int` :class:`Int32` ``int32_t`` :any:`int` :class:`CString` ``char *`` :any:`str` :class:`ByteArray` ``uint8_t *`` :any:`bytes` ======================= ============= ============ For instance: :: appmessage.send_message(UUID("6FEAF2DE-24FA-4ED3-AF66-C853FA6E9C3C"), { 16: Uint8(62), 6428356: CString("friendship"), }) :param target_app: The UUID of the app to which to send a message. :type target_app: ~uuid.UUID :param dictionary: The dictionary to send. :type dictionary: dict :return: The transaction ID sent message, as used in the ``ack`` and ``nack`` events. :rtype: int """ tid = self._get_txid() message = self._message_type(transaction_id=tid) tuples = [] for k, v in iteritems(dictionary): if isinstance(v, AppMessageNumber): tuples.append(AppMessageTuple(key=k, type=v.type, data=struct.pack(self._type_mapping[v.type, v.length], v.value))) elif v.type == AppMessageTuple.Type.CString: tuples.append(AppMessageTuple(key=k, type=v.type, data=v.value.encode('utf-8') + b'\x00')) elif v.type == AppMessageTuple.Type.ByteArray: tuples.append(AppMessageTuple(key=k, type=v.type, data=v.value)) message.data = AppMessagePush(uuid=target_app, dictionary=tuples) self._pending_messages[tid] = target_app self._pebble.send_packet(message) return tid
python
def send_message(self, target_app, dictionary): """ Send a message to the given app, which should be currently running on the Pebble (unless using a non-standard AppMessage endpoint, in which case its rules apply). AppMessage can only represent flat dictionaries with integer keys; as such, ``dictionary`` must be flat and have integer keys. Because the AppMessage dictionary type is more expressive than Python's native types allow, all entries in the dictionary provided must be wrapped in one of the value types: ======================= ============= ============ AppMessageService type C type Python type ======================= ============= ============ :class:`Uint8` ``uint8_t`` :any:`int` :class:`Uint16` ``uint16_t`` :any:`int` :class:`Uint32` ``uint32_t`` :any:`int` :class:`Int8` ``int8_t`` :any:`int` :class:`Int16` ``int16_t`` :any:`int` :class:`Int32` ``int32_t`` :any:`int` :class:`CString` ``char *`` :any:`str` :class:`ByteArray` ``uint8_t *`` :any:`bytes` ======================= ============= ============ For instance: :: appmessage.send_message(UUID("6FEAF2DE-24FA-4ED3-AF66-C853FA6E9C3C"), { 16: Uint8(62), 6428356: CString("friendship"), }) :param target_app: The UUID of the app to which to send a message. :type target_app: ~uuid.UUID :param dictionary: The dictionary to send. :type dictionary: dict :return: The transaction ID sent message, as used in the ``ack`` and ``nack`` events. :rtype: int """ tid = self._get_txid() message = self._message_type(transaction_id=tid) tuples = [] for k, v in iteritems(dictionary): if isinstance(v, AppMessageNumber): tuples.append(AppMessageTuple(key=k, type=v.type, data=struct.pack(self._type_mapping[v.type, v.length], v.value))) elif v.type == AppMessageTuple.Type.CString: tuples.append(AppMessageTuple(key=k, type=v.type, data=v.value.encode('utf-8') + b'\x00')) elif v.type == AppMessageTuple.Type.ByteArray: tuples.append(AppMessageTuple(key=k, type=v.type, data=v.value)) message.data = AppMessagePush(uuid=target_app, dictionary=tuples) self._pending_messages[tid] = target_app self._pebble.send_packet(message) return tid
[ "def", "send_message", "(", "self", ",", "target_app", ",", "dictionary", ")", ":", "tid", "=", "self", ".", "_get_txid", "(", ")", "message", "=", "self", ".", "_message_type", "(", "transaction_id", "=", "tid", ")", "tuples", "=", "[", "]", "for", "k...
Send a message to the given app, which should be currently running on the Pebble (unless using a non-standard AppMessage endpoint, in which case its rules apply). AppMessage can only represent flat dictionaries with integer keys; as such, ``dictionary`` must be flat and have integer keys. Because the AppMessage dictionary type is more expressive than Python's native types allow, all entries in the dictionary provided must be wrapped in one of the value types: ======================= ============= ============ AppMessageService type C type Python type ======================= ============= ============ :class:`Uint8` ``uint8_t`` :any:`int` :class:`Uint16` ``uint16_t`` :any:`int` :class:`Uint32` ``uint32_t`` :any:`int` :class:`Int8` ``int8_t`` :any:`int` :class:`Int16` ``int16_t`` :any:`int` :class:`Int32` ``int32_t`` :any:`int` :class:`CString` ``char *`` :any:`str` :class:`ByteArray` ``uint8_t *`` :any:`bytes` ======================= ============= ============ For instance: :: appmessage.send_message(UUID("6FEAF2DE-24FA-4ED3-AF66-C853FA6E9C3C"), { 16: Uint8(62), 6428356: CString("friendship"), }) :param target_app: The UUID of the app to which to send a message. :type target_app: ~uuid.UUID :param dictionary: The dictionary to send. :type dictionary: dict :return: The transaction ID sent message, as used in the ``ack`` and ``nack`` events. :rtype: int
[ "Send", "a", "message", "to", "the", "given", "app", "which", "should", "be", "currently", "running", "on", "the", "Pebble", "(", "unless", "using", "a", "non", "-", "standard", "AppMessage", "endpoint", "in", "which", "case", "its", "rules", "apply", ")",...
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/services/appmessage.py#L73-L125
INM-6/hybridLFPy
examples/brunel_alpha_nest.py
simulate
def simulate(): '''instantiate and execute network simulation''' #separate model execution from parameters for safe import from other files nest.ResetKernel() ''' Configuration of the simulation kernel by the previously defined time resolution used in the simulation. Setting "print_time" to True prints the already processed simulation time as well as its percentage of the total simulation time. ''' nest.SetKernelStatus({"resolution": dt, "print_time": True, "overwrite_files": True}) print("Building network") ''' Configuration of the model `iaf_psc_alpha` and `poisson_generator` using SetDefaults(). This function expects the model to be the inserted as a string and the parameter to be specified in a dictionary. All instances of theses models created after this point will have the properties specified in the dictionary by default. ''' nest.SetDefaults("iaf_psc_alpha", neuron_params) nest.SetDefaults("poisson_generator",{"rate": p_rate}) ''' Creation of the nodes using `Create`. We store the returned handles in variables for later reference. Here the excitatory and inhibitory, as well as the poisson generator and two spike detectors. The spike detectors will later be used to record excitatory and inhibitory spikes. ''' nodes_ex = nest.Create("iaf_psc_alpha",NE) nodes_in = nest.Create("iaf_psc_alpha",NI) noise = nest.Create("poisson_generator") espikes = nest.Create("spike_detector") ispikes = nest.Create("spike_detector") print("first exc node: {}".format(nodes_ex[0])) print("first inh node: {}".format(nodes_in[0])) ''' distribute membrane potentials ''' nest.SetStatus(nodes_ex, "V_m", random.rand(len(nodes_ex))*neuron_params["V_th"]) nest.SetStatus(nodes_in, "V_m", random.rand(len(nodes_in))*neuron_params["V_th"]) ''' Configuration of the spike detectors recording excitatory and inhibitory spikes using `SetStatus`, which expects a list of node handles and a list of parameter dictionaries. Setting the variable "to_file" to True ensures that the spikes will be recorded in a .gdf file starting with the string assigned to label. Setting "withtime" and "withgid" to True ensures that each spike is saved to file by stating the gid of the spiking neuron and the spike time in one line. ''' nest.SetStatus(espikes,[{ "label": os.path.join(spike_output_path, label + "-EX"), "withtime": True, "withgid": True, "to_file": True, }]) nest.SetStatus(ispikes,[{ "label": os.path.join(spike_output_path, label + "-IN"), "withtime": True, "withgid": True, "to_file": True,}]) print("Connecting devices") ''' Definition of a synapse using `CopyModel`, which expects the model name of a pre-defined synapse, the name of the customary synapse and an optional parameter dictionary. The parameters defined in the dictionary will be the default parameter for the customary synapse. Here we define one synapse for the excitatory and one for the inhibitory connections giving the previously defined weights and equal delays. ''' nest.CopyModel("static_synapse","excitatory",{"weight":J_ex, "delay":delay}) nest.CopyModel("static_synapse","inhibitory",{"weight":J_in, "delay":delay}) ''' Connecting the previously defined poisson generator to the excitatory and inhibitory neurons using the excitatory synapse. Since the poisson generator is connected to all neurons in the population the default rule ('all_to_all') of Connect() is used. The synaptic properties are inserted via syn_spec which expects a dictionary when defining multiple variables or a string when simply using a pre-defined synapse. ''' if Poisson: nest.Connect(noise,nodes_ex, 'all_to_all', "excitatory") nest.Connect(noise,nodes_in,'all_to_all', "excitatory") ''' Connecting the first N_neurons nodes of the excitatory and inhibitory population to the associated spike detectors using excitatory synapses. Here the same shortcut for the specification of the synapse as defined above is used. ''' nest.Connect(nodes_ex,espikes, 'all_to_all', "excitatory") nest.Connect(nodes_in,ispikes, 'all_to_all', "excitatory") print("Connecting network") print("Excitatory connections") ''' Connecting the excitatory population to all neurons using the pre-defined excitatory synapse. Beforehand, the connection parameter are defined in a dictionary. Here we use the connection rule 'fixed_indegree', which requires the definition of the indegree. Since the synapse specification is reduced to assigning the pre-defined excitatory synapse it suffices to insert a string. ''' conn_params_ex = {'rule': 'fixed_indegree', 'indegree': CE} nest.Connect(nodes_ex, nodes_ex+nodes_in, conn_params_ex, "excitatory") print("Inhibitory connections") ''' Connecting the inhibitory population to all neurons using the pre-defined inhibitory synapse. The connection parameter as well as the synapse paramtere are defined analogously to the connection from the excitatory population defined above. ''' conn_params_in = {'rule': 'fixed_indegree', 'indegree': CI} nest.Connect(nodes_in, nodes_ex+nodes_in, conn_params_in, "inhibitory") ''' Storage of the time point after the buildup of the network in a variable. ''' endbuild=time.time() ''' Simulation of the network. ''' print("Simulating") nest.Simulate(simtime) ''' Storage of the time point after the simulation of the network in a variable. ''' endsimulate= time.time() ''' Reading out the total number of spikes received from the spike detector connected to the excitatory population and the inhibitory population. ''' events_ex = nest.GetStatus(espikes,"n_events")[0] events_in = nest.GetStatus(ispikes,"n_events")[0] ''' Calculation of the average firing rate of the excitatory and the inhibitory neurons by dividing the total number of recorded spikes by the number of neurons recorded from and the simulation time. The multiplication by 1000.0 converts the unit 1/ms to 1/s=Hz. ''' rate_ex = events_ex/simtime*1000.0/N_neurons rate_in = events_in/simtime*1000.0/N_neurons ''' Reading out the number of connections established using the excitatory and inhibitory synapse model. The numbers are summed up resulting in the total number of synapses. ''' num_synapses = nest.GetDefaults("excitatory")["num_connections"]+\ nest.GetDefaults("inhibitory")["num_connections"] ''' Establishing the time it took to build and simulate the network by taking the difference of the pre-defined time variables. ''' build_time = endbuild-startbuild sim_time = endsimulate-endbuild ''' Printing the network properties, firing rates and building times. ''' print("Brunel network simulation (Python)") print("Number of neurons : {0}".format(N_neurons)) print("Number of synapses: {0}".format(num_synapses)) print(" Exitatory : {0}".format(int(CE * N_neurons) + N_neurons)) print(" Inhibitory : {0}".format(int(CI * N_neurons))) print("Excitatory rate : %.2f Hz" % rate_ex) print("Inhibitory rate : %.2f Hz" % rate_in) print("Building time : %.2f s" % build_time) print("Simulation time : %.2f s" % sim_time) ''' Plot a raster of the excitatory neurons and a histogram. ''' if False: nest.raster_plot.from_device(espikes, hist=True) nest.raster_plot.from_device(ispikes, hist=True) nest.raster_plot.show()
python
def simulate(): '''instantiate and execute network simulation''' #separate model execution from parameters for safe import from other files nest.ResetKernel() ''' Configuration of the simulation kernel by the previously defined time resolution used in the simulation. Setting "print_time" to True prints the already processed simulation time as well as its percentage of the total simulation time. ''' nest.SetKernelStatus({"resolution": dt, "print_time": True, "overwrite_files": True}) print("Building network") ''' Configuration of the model `iaf_psc_alpha` and `poisson_generator` using SetDefaults(). This function expects the model to be the inserted as a string and the parameter to be specified in a dictionary. All instances of theses models created after this point will have the properties specified in the dictionary by default. ''' nest.SetDefaults("iaf_psc_alpha", neuron_params) nest.SetDefaults("poisson_generator",{"rate": p_rate}) ''' Creation of the nodes using `Create`. We store the returned handles in variables for later reference. Here the excitatory and inhibitory, as well as the poisson generator and two spike detectors. The spike detectors will later be used to record excitatory and inhibitory spikes. ''' nodes_ex = nest.Create("iaf_psc_alpha",NE) nodes_in = nest.Create("iaf_psc_alpha",NI) noise = nest.Create("poisson_generator") espikes = nest.Create("spike_detector") ispikes = nest.Create("spike_detector") print("first exc node: {}".format(nodes_ex[0])) print("first inh node: {}".format(nodes_in[0])) ''' distribute membrane potentials ''' nest.SetStatus(nodes_ex, "V_m", random.rand(len(nodes_ex))*neuron_params["V_th"]) nest.SetStatus(nodes_in, "V_m", random.rand(len(nodes_in))*neuron_params["V_th"]) ''' Configuration of the spike detectors recording excitatory and inhibitory spikes using `SetStatus`, which expects a list of node handles and a list of parameter dictionaries. Setting the variable "to_file" to True ensures that the spikes will be recorded in a .gdf file starting with the string assigned to label. Setting "withtime" and "withgid" to True ensures that each spike is saved to file by stating the gid of the spiking neuron and the spike time in one line. ''' nest.SetStatus(espikes,[{ "label": os.path.join(spike_output_path, label + "-EX"), "withtime": True, "withgid": True, "to_file": True, }]) nest.SetStatus(ispikes,[{ "label": os.path.join(spike_output_path, label + "-IN"), "withtime": True, "withgid": True, "to_file": True,}]) print("Connecting devices") ''' Definition of a synapse using `CopyModel`, which expects the model name of a pre-defined synapse, the name of the customary synapse and an optional parameter dictionary. The parameters defined in the dictionary will be the default parameter for the customary synapse. Here we define one synapse for the excitatory and one for the inhibitory connections giving the previously defined weights and equal delays. ''' nest.CopyModel("static_synapse","excitatory",{"weight":J_ex, "delay":delay}) nest.CopyModel("static_synapse","inhibitory",{"weight":J_in, "delay":delay}) ''' Connecting the previously defined poisson generator to the excitatory and inhibitory neurons using the excitatory synapse. Since the poisson generator is connected to all neurons in the population the default rule ('all_to_all') of Connect() is used. The synaptic properties are inserted via syn_spec which expects a dictionary when defining multiple variables or a string when simply using a pre-defined synapse. ''' if Poisson: nest.Connect(noise,nodes_ex, 'all_to_all', "excitatory") nest.Connect(noise,nodes_in,'all_to_all', "excitatory") ''' Connecting the first N_neurons nodes of the excitatory and inhibitory population to the associated spike detectors using excitatory synapses. Here the same shortcut for the specification of the synapse as defined above is used. ''' nest.Connect(nodes_ex,espikes, 'all_to_all', "excitatory") nest.Connect(nodes_in,ispikes, 'all_to_all', "excitatory") print("Connecting network") print("Excitatory connections") ''' Connecting the excitatory population to all neurons using the pre-defined excitatory synapse. Beforehand, the connection parameter are defined in a dictionary. Here we use the connection rule 'fixed_indegree', which requires the definition of the indegree. Since the synapse specification is reduced to assigning the pre-defined excitatory synapse it suffices to insert a string. ''' conn_params_ex = {'rule': 'fixed_indegree', 'indegree': CE} nest.Connect(nodes_ex, nodes_ex+nodes_in, conn_params_ex, "excitatory") print("Inhibitory connections") ''' Connecting the inhibitory population to all neurons using the pre-defined inhibitory synapse. The connection parameter as well as the synapse paramtere are defined analogously to the connection from the excitatory population defined above. ''' conn_params_in = {'rule': 'fixed_indegree', 'indegree': CI} nest.Connect(nodes_in, nodes_ex+nodes_in, conn_params_in, "inhibitory") ''' Storage of the time point after the buildup of the network in a variable. ''' endbuild=time.time() ''' Simulation of the network. ''' print("Simulating") nest.Simulate(simtime) ''' Storage of the time point after the simulation of the network in a variable. ''' endsimulate= time.time() ''' Reading out the total number of spikes received from the spike detector connected to the excitatory population and the inhibitory population. ''' events_ex = nest.GetStatus(espikes,"n_events")[0] events_in = nest.GetStatus(ispikes,"n_events")[0] ''' Calculation of the average firing rate of the excitatory and the inhibitory neurons by dividing the total number of recorded spikes by the number of neurons recorded from and the simulation time. The multiplication by 1000.0 converts the unit 1/ms to 1/s=Hz. ''' rate_ex = events_ex/simtime*1000.0/N_neurons rate_in = events_in/simtime*1000.0/N_neurons ''' Reading out the number of connections established using the excitatory and inhibitory synapse model. The numbers are summed up resulting in the total number of synapses. ''' num_synapses = nest.GetDefaults("excitatory")["num_connections"]+\ nest.GetDefaults("inhibitory")["num_connections"] ''' Establishing the time it took to build and simulate the network by taking the difference of the pre-defined time variables. ''' build_time = endbuild-startbuild sim_time = endsimulate-endbuild ''' Printing the network properties, firing rates and building times. ''' print("Brunel network simulation (Python)") print("Number of neurons : {0}".format(N_neurons)) print("Number of synapses: {0}".format(num_synapses)) print(" Exitatory : {0}".format(int(CE * N_neurons) + N_neurons)) print(" Inhibitory : {0}".format(int(CI * N_neurons))) print("Excitatory rate : %.2f Hz" % rate_ex) print("Inhibitory rate : %.2f Hz" % rate_in) print("Building time : %.2f s" % build_time) print("Simulation time : %.2f s" % sim_time) ''' Plot a raster of the excitatory neurons and a histogram. ''' if False: nest.raster_plot.from_device(espikes, hist=True) nest.raster_plot.from_device(ispikes, hist=True) nest.raster_plot.show()
[ "def", "simulate", "(", ")", ":", "#separate model execution from parameters for safe import from other files", "nest", ".", "ResetKernel", "(", ")", "'''\n Configuration of the simulation kernel by the previously defined time\n resolution used in the simulation. Setting \"print_time\" t...
instantiate and execute network simulation
[ "instantiate", "and", "execute", "network", "simulation" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/brunel_alpha_nest.py#L203-L427
avalente/appmetrics
appmetrics/metrics.py
new_metric
def new_metric(name, class_, *args, **kwargs): """Create a new metric of the given class. Raise DuplicateMetricError if the given name has been already registered before Internal function - use "new_<type> instead" """ with LOCK: try: item = REGISTRY[name] except KeyError: item = REGISTRY[name] = class_(*args, **kwargs) return item raise DuplicateMetricError("Metric {} already exists of type {}".format(name, type(item).__name__))
python
def new_metric(name, class_, *args, **kwargs): """Create a new metric of the given class. Raise DuplicateMetricError if the given name has been already registered before Internal function - use "new_<type> instead" """ with LOCK: try: item = REGISTRY[name] except KeyError: item = REGISTRY[name] = class_(*args, **kwargs) return item raise DuplicateMetricError("Metric {} already exists of type {}".format(name, type(item).__name__))
[ "def", "new_metric", "(", "name", ",", "class_", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "with", "LOCK", ":", "try", ":", "item", "=", "REGISTRY", "[", "name", "]", "except", "KeyError", ":", "item", "=", "REGISTRY", "[", "name", "]", ...
Create a new metric of the given class. Raise DuplicateMetricError if the given name has been already registered before Internal function - use "new_<type> instead"
[ "Create", "a", "new", "metric", "of", "the", "given", "class", "." ]
train
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/metrics.py#L35-L50
avalente/appmetrics
appmetrics/metrics.py
delete_metric
def delete_metric(name): """Remove the named metric""" with LOCK: old_metric = REGISTRY.pop(name, None) # look for the metric name in the tags and remove it for _, tags in py3comp.iteritems(TAGS): if name in tags: tags.remove(name) return old_metric
python
def delete_metric(name): """Remove the named metric""" with LOCK: old_metric = REGISTRY.pop(name, None) # look for the metric name in the tags and remove it for _, tags in py3comp.iteritems(TAGS): if name in tags: tags.remove(name) return old_metric
[ "def", "delete_metric", "(", "name", ")", ":", "with", "LOCK", ":", "old_metric", "=", "REGISTRY", ".", "pop", "(", "name", ",", "None", ")", "# look for the metric name in the tags and remove it", "for", "_", ",", "tags", "in", "py3comp", ".", "iteritems", "(...
Remove the named metric
[ "Remove", "the", "named", "metric" ]
train
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/metrics.py#L53-L64
avalente/appmetrics
appmetrics/metrics.py
new_histogram
def new_histogram(name, reservoir=None): """ Build a new histogram metric with a given reservoir object If the reservoir is not provided, a uniform reservoir with the default size is used """ if reservoir is None: reservoir = histogram.UniformReservoir(histogram.DEFAULT_UNIFORM_RESERVOIR_SIZE) return new_metric(name, histogram.Histogram, reservoir)
python
def new_histogram(name, reservoir=None): """ Build a new histogram metric with a given reservoir object If the reservoir is not provided, a uniform reservoir with the default size is used """ if reservoir is None: reservoir = histogram.UniformReservoir(histogram.DEFAULT_UNIFORM_RESERVOIR_SIZE) return new_metric(name, histogram.Histogram, reservoir)
[ "def", "new_histogram", "(", "name", ",", "reservoir", "=", "None", ")", ":", "if", "reservoir", "is", "None", ":", "reservoir", "=", "histogram", ".", "UniformReservoir", "(", "histogram", ".", "DEFAULT_UNIFORM_RESERVOIR_SIZE", ")", "return", "new_metric", "(",...
Build a new histogram metric with a given reservoir object If the reservoir is not provided, a uniform reservoir with the default size is used
[ "Build", "a", "new", "histogram", "metric", "with", "a", "given", "reservoir", "object", "If", "the", "reservoir", "is", "not", "provided", "a", "uniform", "reservoir", "with", "the", "default", "size", "is", "used" ]
train
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/metrics.py#L106-L115
avalente/appmetrics
appmetrics/metrics.py
new_histogram_with_implicit_reservoir
def new_histogram_with_implicit_reservoir(name, reservoir_type='uniform', *reservoir_args, **reservoir_kwargs): """ Build a new histogram metric and a reservoir from the given parameters """ reservoir = new_reservoir(reservoir_type, *reservoir_args, **reservoir_kwargs) return new_histogram(name, reservoir)
python
def new_histogram_with_implicit_reservoir(name, reservoir_type='uniform', *reservoir_args, **reservoir_kwargs): """ Build a new histogram metric and a reservoir from the given parameters """ reservoir = new_reservoir(reservoir_type, *reservoir_args, **reservoir_kwargs) return new_histogram(name, reservoir)
[ "def", "new_histogram_with_implicit_reservoir", "(", "name", ",", "reservoir_type", "=", "'uniform'", ",", "*", "reservoir_args", ",", "*", "*", "reservoir_kwargs", ")", ":", "reservoir", "=", "new_reservoir", "(", "reservoir_type", ",", "*", "reservoir_args", ",", ...
Build a new histogram metric and a reservoir from the given parameters
[ "Build", "a", "new", "histogram", "metric", "and", "a", "reservoir", "from", "the", "given", "parameters" ]
train
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/metrics.py#L142-L148
avalente/appmetrics
appmetrics/metrics.py
new_reservoir
def new_reservoir(reservoir_type='uniform', *reservoir_args, **reservoir_kwargs): """ Build a new reservoir """ try: reservoir_cls = RESERVOIR_TYPES[reservoir_type] except KeyError: raise InvalidMetricError("Unknown reservoir type: {}".format(reservoir_type)) return reservoir_cls(*reservoir_args, **reservoir_kwargs)
python
def new_reservoir(reservoir_type='uniform', *reservoir_args, **reservoir_kwargs): """ Build a new reservoir """ try: reservoir_cls = RESERVOIR_TYPES[reservoir_type] except KeyError: raise InvalidMetricError("Unknown reservoir type: {}".format(reservoir_type)) return reservoir_cls(*reservoir_args, **reservoir_kwargs)
[ "def", "new_reservoir", "(", "reservoir_type", "=", "'uniform'", ",", "*", "reservoir_args", ",", "*", "*", "reservoir_kwargs", ")", ":", "try", ":", "reservoir_cls", "=", "RESERVOIR_TYPES", "[", "reservoir_type", "]", "except", "KeyError", ":", "raise", "Invali...
Build a new reservoir
[ "Build", "a", "new", "reservoir" ]
train
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/metrics.py#L151-L161
avalente/appmetrics
appmetrics/metrics.py
get_or_create_histogram
def get_or_create_histogram(name, reservoir_type, *reservoir_args, **reservoir_kwargs): """ Will return a histogram matching the given parameters or raise DuplicateMetricError if it can't be created due to a name collision with another histogram with different parameters. """ reservoir = new_reservoir(reservoir_type, *reservoir_args, **reservoir_kwargs) try: hmetric = new_histogram(name, reservoir) except DuplicateMetricError: hmetric = metric(name) if not isinstance(hmetric, histogram.Histogram): raise DuplicateMetricError( "Metric {!r} already exists of type {!r}".format(name, type(hmetric).__name__)) if not hmetric.reservoir.same_kind(reservoir): raise DuplicateMetricError( "Metric {!r} already exists with a different reservoir: {}".format(name, hmetric.reservoir)) return hmetric
python
def get_or_create_histogram(name, reservoir_type, *reservoir_args, **reservoir_kwargs): """ Will return a histogram matching the given parameters or raise DuplicateMetricError if it can't be created due to a name collision with another histogram with different parameters. """ reservoir = new_reservoir(reservoir_type, *reservoir_args, **reservoir_kwargs) try: hmetric = new_histogram(name, reservoir) except DuplicateMetricError: hmetric = metric(name) if not isinstance(hmetric, histogram.Histogram): raise DuplicateMetricError( "Metric {!r} already exists of type {!r}".format(name, type(hmetric).__name__)) if not hmetric.reservoir.same_kind(reservoir): raise DuplicateMetricError( "Metric {!r} already exists with a different reservoir: {}".format(name, hmetric.reservoir)) return hmetric
[ "def", "get_or_create_histogram", "(", "name", ",", "reservoir_type", ",", "*", "reservoir_args", ",", "*", "*", "reservoir_kwargs", ")", ":", "reservoir", "=", "new_reservoir", "(", "reservoir_type", ",", "*", "reservoir_args", ",", "*", "*", "reservoir_kwargs", ...
Will return a histogram matching the given parameters or raise DuplicateMetricError if it can't be created due to a name collision with another histogram with different parameters.
[ "Will", "return", "a", "histogram", "matching", "the", "given", "parameters", "or", "raise", "DuplicateMetricError", "if", "it", "can", "t", "be", "created", "due", "to", "a", "name", "collision", "with", "another", "histogram", "with", "different", "parameters"...
train
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/metrics.py#L164-L184
avalente/appmetrics
appmetrics/metrics.py
with_histogram
def with_histogram(name, reservoir_type="uniform", *reservoir_args, **reservoir_kwargs): """ Time-measuring decorator: the time spent in the wrapped function is measured and added to the named metric. metric_args and metric_kwargs are passed to new_histogram() """ hmetric = get_or_create_histogram(name, reservoir_type, *reservoir_args, **reservoir_kwargs) def wrapper(f): @functools.wraps(f) def fun(*args, **kwargs): t1 = time.time() res = f(*args, **kwargs) t2 = time.time() hmetric.notify(t2-t1) return res return fun return wrapper
python
def with_histogram(name, reservoir_type="uniform", *reservoir_args, **reservoir_kwargs): """ Time-measuring decorator: the time spent in the wrapped function is measured and added to the named metric. metric_args and metric_kwargs are passed to new_histogram() """ hmetric = get_or_create_histogram(name, reservoir_type, *reservoir_args, **reservoir_kwargs) def wrapper(f): @functools.wraps(f) def fun(*args, **kwargs): t1 = time.time() res = f(*args, **kwargs) t2 = time.time() hmetric.notify(t2-t1) return res return fun return wrapper
[ "def", "with_histogram", "(", "name", ",", "reservoir_type", "=", "\"uniform\"", ",", "*", "reservoir_args", ",", "*", "*", "reservoir_kwargs", ")", ":", "hmetric", "=", "get_or_create_histogram", "(", "name", ",", "reservoir_type", ",", "*", "reservoir_args", "...
Time-measuring decorator: the time spent in the wrapped function is measured and added to the named metric. metric_args and metric_kwargs are passed to new_histogram()
[ "Time", "-", "measuring", "decorator", ":", "the", "time", "spent", "in", "the", "wrapped", "function", "is", "measured", "and", "added", "to", "the", "named", "metric", ".", "metric_args", "and", "metric_kwargs", "are", "passed", "to", "new_histogram", "()" ]
train
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/metrics.py#L187-L209
avalente/appmetrics
appmetrics/metrics.py
with_meter
def with_meter(name, tick_interval=meter.DEFAULT_TICK_INTERVAL): """ Call-counting decorator: each time the wrapped function is called the named meter is incremented by one. metric_args and metric_kwargs are passed to new_meter() """ try: mmetric = new_meter(name, tick_interval) except DuplicateMetricError as e: mmetric = metric(name) if not isinstance(mmetric, meter.Meter): raise DuplicateMetricError("Metric {!r} already exists of type {}".format(name, type(mmetric).__name__)) if tick_interval != mmetric.tick_interval: raise DuplicateMetricError("Metric {!r} already exists: {}".format(name, mmetric)) def wrapper(f): @functools.wraps(f) def fun(*args, **kwargs): res = f(*args, **kwargs) mmetric.notify(1) return res return fun return wrapper
python
def with_meter(name, tick_interval=meter.DEFAULT_TICK_INTERVAL): """ Call-counting decorator: each time the wrapped function is called the named meter is incremented by one. metric_args and metric_kwargs are passed to new_meter() """ try: mmetric = new_meter(name, tick_interval) except DuplicateMetricError as e: mmetric = metric(name) if not isinstance(mmetric, meter.Meter): raise DuplicateMetricError("Metric {!r} already exists of type {}".format(name, type(mmetric).__name__)) if tick_interval != mmetric.tick_interval: raise DuplicateMetricError("Metric {!r} already exists: {}".format(name, mmetric)) def wrapper(f): @functools.wraps(f) def fun(*args, **kwargs): res = f(*args, **kwargs) mmetric.notify(1) return res return fun return wrapper
[ "def", "with_meter", "(", "name", ",", "tick_interval", "=", "meter", ".", "DEFAULT_TICK_INTERVAL", ")", ":", "try", ":", "mmetric", "=", "new_meter", "(", "name", ",", "tick_interval", ")", "except", "DuplicateMetricError", "as", "e", ":", "mmetric", "=", "...
Call-counting decorator: each time the wrapped function is called the named meter is incremented by one. metric_args and metric_kwargs are passed to new_meter()
[ "Call", "-", "counting", "decorator", ":", "each", "time", "the", "wrapped", "function", "is", "called", "the", "named", "meter", "is", "incremented", "by", "one", ".", "metric_args", "and", "metric_kwargs", "are", "passed", "to", "new_meter", "()" ]
train
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/metrics.py#L212-L241
avalente/appmetrics
appmetrics/metrics.py
timer
def timer(name, reservoir_type="uniform", *reservoir_args, **reservoir_kwargs): """ Time-measuring context manager: the time spent in the wrapped block if measured and added to the named metric. """ hmetric = get_or_create_histogram(name, reservoir_type, *reservoir_args, **reservoir_kwargs) t1 = time.time() yield t2 = time.time() hmetric.notify(t2 - t1)
python
def timer(name, reservoir_type="uniform", *reservoir_args, **reservoir_kwargs): """ Time-measuring context manager: the time spent in the wrapped block if measured and added to the named metric. """ hmetric = get_or_create_histogram(name, reservoir_type, *reservoir_args, **reservoir_kwargs) t1 = time.time() yield t2 = time.time() hmetric.notify(t2 - t1)
[ "def", "timer", "(", "name", ",", "reservoir_type", "=", "\"uniform\"", ",", "*", "reservoir_args", ",", "*", "*", "reservoir_kwargs", ")", ":", "hmetric", "=", "get_or_create_histogram", "(", "name", ",", "reservoir_type", ",", "*", "reservoir_args", ",", "*"...
Time-measuring context manager: the time spent in the wrapped block if measured and added to the named metric.
[ "Time", "-", "measuring", "context", "manager", ":", "the", "time", "spent", "in", "the", "wrapped", "block", "if", "measured", "and", "added", "to", "the", "named", "metric", "." ]
train
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/metrics.py#L245-L256
avalente/appmetrics
appmetrics/metrics.py
tag
def tag(name, tag_name): """ Tag the named metric with the given tag. """ with LOCK: # just to check if <name> exists metric(name) TAGS.setdefault(tag_name, set()).add(name)
python
def tag(name, tag_name): """ Tag the named metric with the given tag. """ with LOCK: # just to check if <name> exists metric(name) TAGS.setdefault(tag_name, set()).add(name)
[ "def", "tag", "(", "name", ",", "tag_name", ")", ":", "with", "LOCK", ":", "# just to check if <name> exists", "metric", "(", "name", ")", "TAGS", ".", "setdefault", "(", "tag_name", ",", "set", "(", ")", ")", ".", "add", "(", "name", ")" ]
Tag the named metric with the given tag.
[ "Tag", "the", "named", "metric", "with", "the", "given", "tag", "." ]
train
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/metrics.py#L259-L268
avalente/appmetrics
appmetrics/metrics.py
untag
def untag(name, tag_name): """ Remove the given tag from the given metric. Return True if the metric was tagged, False otherwise """ with LOCK: by_tag = TAGS.get(tag_name, None) if not by_tag: return False try: by_tag.remove(name) # remove the tag if no associations left if not by_tag: TAGS.pop(tag_name) return True except KeyError: return False
python
def untag(name, tag_name): """ Remove the given tag from the given metric. Return True if the metric was tagged, False otherwise """ with LOCK: by_tag = TAGS.get(tag_name, None) if not by_tag: return False try: by_tag.remove(name) # remove the tag if no associations left if not by_tag: TAGS.pop(tag_name) return True except KeyError: return False
[ "def", "untag", "(", "name", ",", "tag_name", ")", ":", "with", "LOCK", ":", "by_tag", "=", "TAGS", ".", "get", "(", "tag_name", ",", "None", ")", "if", "not", "by_tag", ":", "return", "False", "try", ":", "by_tag", ".", "remove", "(", "name", ")",...
Remove the given tag from the given metric. Return True if the metric was tagged, False otherwise
[ "Remove", "the", "given", "tag", "from", "the", "given", "metric", ".", "Return", "True", "if", "the", "metric", "was", "tagged", "False", "otherwise" ]
train
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/metrics.py#L294-L313
avalente/appmetrics
appmetrics/metrics.py
metrics_by_name_list
def metrics_by_name_list(names): """ Return a dictionary with {metric name: metric value} for all the metrics with the given names. """ results = {} for name in names: # no lock - a metric could have been removed in the meanwhile try: results[name] = get(name) except InvalidMetricError: continue return results
python
def metrics_by_name_list(names): """ Return a dictionary with {metric name: metric value} for all the metrics with the given names. """ results = {} for name in names: # no lock - a metric could have been removed in the meanwhile try: results[name] = get(name) except InvalidMetricError: continue return results
[ "def", "metrics_by_name_list", "(", "names", ")", ":", "results", "=", "{", "}", "for", "name", "in", "names", ":", "# no lock - a metric could have been removed in the meanwhile", "try", ":", "results", "[", "name", "]", "=", "get", "(", "name", ")", "except", ...
Return a dictionary with {metric name: metric value} for all the metrics with the given names.
[ "Return", "a", "dictionary", "with", "{", "metric", "name", ":", "metric", "value", "}", "for", "all", "the", "metrics", "with", "the", "given", "names", "." ]
train
https://github.com/avalente/appmetrics/blob/366fc7e1ca897e49a2227cbfa43bfa02a47f1acc/appmetrics/metrics.py#L316-L329
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/figure_05.py
connectivity
def connectivity(ax): '''make an imshow of the intranetwork connectivity''' masked_array = np.ma.array(params.C_YX, mask=params.C_YX==0) # if analysis_params.bw: # cmap = plt.get_cmap(gray, 20) # cmap.set_bad('k', 1.) # else: # cmap = plt.get_cmap('hot', 20) # cmap.set_bad('k', 0.5) # im = ax.imshow(masked_array, cmap=cmap, vmin=0, interpolation='nearest') im = ax.pcolormesh(masked_array, cmap=cmap, vmin=0, ) #interpolation='nearest') ax.axis(ax.axis('tight')) ax.invert_yaxis() ax.xaxis.set_ticks_position('top') ax.set_xticks(np.arange(9)+0.5) ax.set_yticks(np.arange(8)+0.5) ax.set_xticklabels(params.X, rotation=270) ax.set_yticklabels(params.Y, ) ax.xaxis.set_label_position('top') ax.set_xlabel(r'$X$', labelpad=-1,fontsize=8) ax.set_ylabel(r'$Y$', labelpad=0, rotation=0,fontsize=8) rect = np.array(ax.get_position().bounds) rect[0] += rect[2] + 0.01 rect[2] = 0.01 fig = plt.gcf() cax = fig.add_axes(rect) cbar = plt.colorbar(im, cax=cax) #cbar.set_label(r'$C_{YX}$', ha='center') cbar.set_label(r'$C_{YX}$', labelpad=0)
python
def connectivity(ax): '''make an imshow of the intranetwork connectivity''' masked_array = np.ma.array(params.C_YX, mask=params.C_YX==0) # if analysis_params.bw: # cmap = plt.get_cmap(gray, 20) # cmap.set_bad('k', 1.) # else: # cmap = plt.get_cmap('hot', 20) # cmap.set_bad('k', 0.5) # im = ax.imshow(masked_array, cmap=cmap, vmin=0, interpolation='nearest') im = ax.pcolormesh(masked_array, cmap=cmap, vmin=0, ) #interpolation='nearest') ax.axis(ax.axis('tight')) ax.invert_yaxis() ax.xaxis.set_ticks_position('top') ax.set_xticks(np.arange(9)+0.5) ax.set_yticks(np.arange(8)+0.5) ax.set_xticklabels(params.X, rotation=270) ax.set_yticklabels(params.Y, ) ax.xaxis.set_label_position('top') ax.set_xlabel(r'$X$', labelpad=-1,fontsize=8) ax.set_ylabel(r'$Y$', labelpad=0, rotation=0,fontsize=8) rect = np.array(ax.get_position().bounds) rect[0] += rect[2] + 0.01 rect[2] = 0.01 fig = plt.gcf() cax = fig.add_axes(rect) cbar = plt.colorbar(im, cax=cax) #cbar.set_label(r'$C_{YX}$', ha='center') cbar.set_label(r'$C_{YX}$', labelpad=0)
[ "def", "connectivity", "(", "ax", ")", ":", "masked_array", "=", "np", ".", "ma", ".", "array", "(", "params", ".", "C_YX", ",", "mask", "=", "params", ".", "C_YX", "==", "0", ")", "# if analysis_params.bw:", "# cmap = plt.get_cmap(gray, 20)", "# cmap....
make an imshow of the intranetwork connectivity
[ "make", "an", "imshow", "of", "the", "intranetwork", "connectivity" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/figure_05.py#L26-L57
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/figure_05.py
cell_type_specificity
def cell_type_specificity(ax): '''make an imshow of the intranetwork connectivity''' masked_array = np.ma.array(params.T_yX, mask=params.T_yX==0) # cmap = plt.get_cmap('hot', 20) # cmap.set_bad('k', 0.5) # im = ax.imshow(masked_array, cmap=cmap, vmin=0, interpolation='nearest') im = ax.pcolormesh(masked_array, cmap=cmap, vmin=0, ) #interpolation='nearest') ax.axis(ax.axis('tight')) ax.invert_yaxis() ax.xaxis.set_ticks_position('top') ax.set_xticks(np.arange(9)+0.5) ax.set_yticks(np.arange(16)+0.5) ax.set_xticklabels(params.X, rotation=270) ax.set_yticklabels(params.y, ) ax.xaxis.set_label_position('top') ax.set_xlabel(r'$X$', labelpad=-1,fontsize=8) ax.set_ylabel(r'$y$', labelpad=0, rotation=0,fontsize=8) rect = np.array(ax.get_position().bounds) rect[0] += rect[2] + 0.01 rect[2] = 0.01 fig = plt.gcf() cax = fig.add_axes(rect) cbar = plt.colorbar(im, cax=cax) #cbar.set_label(r'$\mathcal{T}_{yX}$', ha='center') cbar.set_label(r'$\mathcal{T}_{yX}$', labelpad=0)
python
def cell_type_specificity(ax): '''make an imshow of the intranetwork connectivity''' masked_array = np.ma.array(params.T_yX, mask=params.T_yX==0) # cmap = plt.get_cmap('hot', 20) # cmap.set_bad('k', 0.5) # im = ax.imshow(masked_array, cmap=cmap, vmin=0, interpolation='nearest') im = ax.pcolormesh(masked_array, cmap=cmap, vmin=0, ) #interpolation='nearest') ax.axis(ax.axis('tight')) ax.invert_yaxis() ax.xaxis.set_ticks_position('top') ax.set_xticks(np.arange(9)+0.5) ax.set_yticks(np.arange(16)+0.5) ax.set_xticklabels(params.X, rotation=270) ax.set_yticklabels(params.y, ) ax.xaxis.set_label_position('top') ax.set_xlabel(r'$X$', labelpad=-1,fontsize=8) ax.set_ylabel(r'$y$', labelpad=0, rotation=0,fontsize=8) rect = np.array(ax.get_position().bounds) rect[0] += rect[2] + 0.01 rect[2] = 0.01 fig = plt.gcf() cax = fig.add_axes(rect) cbar = plt.colorbar(im, cax=cax) #cbar.set_label(r'$\mathcal{T}_{yX}$', ha='center') cbar.set_label(r'$\mathcal{T}_{yX}$', labelpad=0)
[ "def", "cell_type_specificity", "(", "ax", ")", ":", "masked_array", "=", "np", ".", "ma", ".", "array", "(", "params", ".", "T_yX", ",", "mask", "=", "params", ".", "T_yX", "==", "0", ")", "# cmap = plt.get_cmap('hot', 20)", "# cmap.set_bad('k', 0.5)", "# im ...
make an imshow of the intranetwork connectivity
[ "make", "an", "imshow", "of", "the", "intranetwork", "connectivity" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/figure_05.py#L60-L86
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/figure_05.py
quantity_yXL
def quantity_yXL(fig, left, bottom, top, quantity=params.L_yXL, label=r'$\mathcal{L}_{yXL}$'): '''make a bunch of image plots, each showing the spatial normalized connectivity of synapses''' layers = ['L1', 'L2/3', 'L4', 'L5', 'L6'] ncols = len(params.y) / 4 #assess vlims vmin = 0 vmax = 0 for y in params.y: if quantity[y].max() > vmax: vmax = quantity[y].max() gs = gridspec.GridSpec(4, 4, left=left, bottom=bottom, top=top) for i, y in enumerate(params.y): ax = fig.add_subplot(gs[i/4, i%4]) masked_array = np.ma.array(quantity[y], mask=quantity[y]==0) # cmap = plt.get_cmap('hot', 20) # cmap.set_bad('k', 0.5) # im = ax.imshow(masked_array, im = ax.pcolormesh(masked_array, vmin=vmin, vmax=vmax, cmap=cmap, #interpolation='nearest', ) ax.invert_yaxis() ax.axis(ax.axis('tight')) ax.xaxis.set_ticks_position('top') ax.set_xticks(np.arange(9)+0.5) ax.set_yticks(np.arange(5)+0.5) #if divmod(i, 4)[1] == 0: if i % 4 == 0: ax.set_yticklabels(layers, ) ax.set_ylabel('$L$', labelpad=0.) else: ax.set_yticklabels([]) if i < 4: ax.set_xlabel(r'$X$', labelpad=-1,fontsize=8) ax.set_xticklabels(params.X, rotation=270) else: ax.set_xticklabels([]) ax.xaxis.set_label_position('top') ax.text(0.5, -0.13, r'$y=$'+y, horizontalalignment='center', verticalalignment='center', # transform=ax.transAxes,fontsize=5.5) #colorbar rect = np.array(ax.get_position().bounds) rect[0] += rect[2] + 0.01 rect[1] = bottom rect[2] = 0.01 rect[3] = top-bottom cax = fig.add_axes(rect) cbar = plt.colorbar(im, cax=cax) #cbar.set_label(label, ha='center') cbar.set_label(label, labelpad=0)
python
def quantity_yXL(fig, left, bottom, top, quantity=params.L_yXL, label=r'$\mathcal{L}_{yXL}$'): '''make a bunch of image plots, each showing the spatial normalized connectivity of synapses''' layers = ['L1', 'L2/3', 'L4', 'L5', 'L6'] ncols = len(params.y) / 4 #assess vlims vmin = 0 vmax = 0 for y in params.y: if quantity[y].max() > vmax: vmax = quantity[y].max() gs = gridspec.GridSpec(4, 4, left=left, bottom=bottom, top=top) for i, y in enumerate(params.y): ax = fig.add_subplot(gs[i/4, i%4]) masked_array = np.ma.array(quantity[y], mask=quantity[y]==0) # cmap = plt.get_cmap('hot', 20) # cmap.set_bad('k', 0.5) # im = ax.imshow(masked_array, im = ax.pcolormesh(masked_array, vmin=vmin, vmax=vmax, cmap=cmap, #interpolation='nearest', ) ax.invert_yaxis() ax.axis(ax.axis('tight')) ax.xaxis.set_ticks_position('top') ax.set_xticks(np.arange(9)+0.5) ax.set_yticks(np.arange(5)+0.5) #if divmod(i, 4)[1] == 0: if i % 4 == 0: ax.set_yticklabels(layers, ) ax.set_ylabel('$L$', labelpad=0.) else: ax.set_yticklabels([]) if i < 4: ax.set_xlabel(r'$X$', labelpad=-1,fontsize=8) ax.set_xticklabels(params.X, rotation=270) else: ax.set_xticklabels([]) ax.xaxis.set_label_position('top') ax.text(0.5, -0.13, r'$y=$'+y, horizontalalignment='center', verticalalignment='center', # transform=ax.transAxes,fontsize=5.5) #colorbar rect = np.array(ax.get_position().bounds) rect[0] += rect[2] + 0.01 rect[1] = bottom rect[2] = 0.01 rect[3] = top-bottom cax = fig.add_axes(rect) cbar = plt.colorbar(im, cax=cax) #cbar.set_label(label, ha='center') cbar.set_label(label, labelpad=0)
[ "def", "quantity_yXL", "(", "fig", ",", "left", ",", "bottom", ",", "top", ",", "quantity", "=", "params", ".", "L_yXL", ",", "label", "=", "r'$\\mathcal{L}_{yXL}$'", ")", ":", "layers", "=", "[", "'L1'", ",", "'L2/3'", ",", "'L4'", ",", "'L5'", ",", ...
make a bunch of image plots, each showing the spatial normalized connectivity of synapses
[ "make", "a", "bunch", "of", "image", "plots", "each", "showing", "the", "spatial", "normalized", "connectivity", "of", "synapses" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/figure_05.py#L89-L155
gersolar/noaaclass
noaaclass/noaaclass.py
Connection.get
def get(self, url, proto='http'): """ Load an url using the GET method. Keyword arguments: url -- the Universal Resource Location proto -- the protocol (default 'http') """ self.last_response = self.session.get(proto + self.base_uri + url, headers=self.headers, cookies=self.cookies, allow_redirects=True, verify=self.verify) return self.last_response_soup
python
def get(self, url, proto='http'): """ Load an url using the GET method. Keyword arguments: url -- the Universal Resource Location proto -- the protocol (default 'http') """ self.last_response = self.session.get(proto + self.base_uri + url, headers=self.headers, cookies=self.cookies, allow_redirects=True, verify=self.verify) return self.last_response_soup
[ "def", "get", "(", "self", ",", "url", ",", "proto", "=", "'http'", ")", ":", "self", ".", "last_response", "=", "self", ".", "session", ".", "get", "(", "proto", "+", "self", ".", "base_uri", "+", "url", ",", "headers", "=", "self", ".", "headers"...
Load an url using the GET method. Keyword arguments: url -- the Universal Resource Location proto -- the protocol (default 'http')
[ "Load", "an", "url", "using", "the", "GET", "method", "." ]
train
https://github.com/gersolar/noaaclass/blob/a60601bda78620ff5ae11abdac92de747acfbfbe/noaaclass/noaaclass.py#L191-L204
gersolar/noaaclass
noaaclass/noaaclass.py
Connection.post
def post(self, url, data, proto='http', form_name=None): """ Load an url using the POST method. Keyword arguments: url -- the Universal Resource Location data -- the form to be sent proto -- the protocol (default 'http') form_name -- the form name to search the default values """ form = self.translator.fill_form(self.last_response_soup, form_name if form_name else url, data) self.last_response = self.session.post(proto + self.base_uri + url, headers=self.headers, cookies=self.cookies, data=form, allow_redirects=True, verify=self.verify) return self.last_response_soup
python
def post(self, url, data, proto='http', form_name=None): """ Load an url using the POST method. Keyword arguments: url -- the Universal Resource Location data -- the form to be sent proto -- the protocol (default 'http') form_name -- the form name to search the default values """ form = self.translator.fill_form(self.last_response_soup, form_name if form_name else url, data) self.last_response = self.session.post(proto + self.base_uri + url, headers=self.headers, cookies=self.cookies, data=form, allow_redirects=True, verify=self.verify) return self.last_response_soup
[ "def", "post", "(", "self", ",", "url", ",", "data", ",", "proto", "=", "'http'", ",", "form_name", "=", "None", ")", ":", "form", "=", "self", ".", "translator", ".", "fill_form", "(", "self", ".", "last_response_soup", ",", "form_name", "if", "form_n...
Load an url using the POST method. Keyword arguments: url -- the Universal Resource Location data -- the form to be sent proto -- the protocol (default 'http') form_name -- the form name to search the default values
[ "Load", "an", "url", "using", "the", "POST", "method", "." ]
train
https://github.com/gersolar/noaaclass/blob/a60601bda78620ff5ae11abdac92de747acfbfbe/noaaclass/noaaclass.py#L238-L256
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/figure_13.py
fig_kernel_lfp
def fig_kernel_lfp(savefolders, params, transient=200, T=[800., 1000.], X='L5E', lags=[20, 20], channels=[0,3,7,11,13]): ''' This function calculates the STA of LFP, extracts kernels and recontructs the LFP from kernels. Arguments :: transient : the time in milliseconds, after which the analysis should begin so as to avoid any starting transients X : id of presynaptic trigger population ''' # Electrode geometry zvec = np.r_[params.electrodeParams['z']] alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' ana_params.set_PLOS_2column_fig_style(ratio=1) # Start the figure fig = plt.figure() fig.subplots_adjust(left=0.06, right=0.95, bottom=0.05, top=0.95, hspace=0.23, wspace=0.55) # create grid_spec gs = gridspec.GridSpec(2*len(channels)+1, 7) ########################################################################### # spikegen "network" activity ############################################################################ # path to simulation files params.savefolder = os.path.join(os.path.split(params.savefolder)[0], 'simulation_output_spikegen') params.figures_path = os.path.join(params.savefolder, 'figures') params.spike_output_path = os.path.join(params.savefolder, 'processed_nest_output') params.networkSimParams['spike_output_path'] = params.spike_output_path # Get the spikegen LFP: f = h5py.File(os.path.join(params.savefolder, 'LFPsum.h5')) srate = f['srate'].value tvec = np.arange(f['data'].shape[1]) * 1000. / srate # slice inds = (tvec < params.tstop) & (tvec >= transient) data_sg_raw = f['data'].value.astype(float) data_sg = data_sg_raw[:, inds] f.close() # kernel width kwidth = 20 # create some dummy spike times activationtimes = np.array([x*100 for x in range(3,11)] + [200]) networkSimSpikegen = CachedNetwork(**params.networkSimParams) x, y = networkSimSpikegen.get_xy([transient, params.tstop]) ########################################################################### # Part A: spatiotemporal kernels, all presynaptic populations ############################################################################ titles = ['TC', 'L23E/I', 'LFP kernels \n L4E/I', 'L5E/I', 'L6E/I', ] COUNTER = 0 for i, X__ in enumerate(([['TC']]) + zip(params.X[1::2], params.X[2::2])): ax = fig.add_subplot(gs[:len(channels), i]) if i == 0: phlp.annotate_subplot(ax, ncols=7, nrows=4, letter=alphabet[0], linear_offset=0.02) for j, X_ in enumerate(X__): # create spikegen histogram for population Y cinds = np.arange(activationtimes[np.arange(-1, 8)][COUNTER]-kwidth, activationtimes[np.arange(-1, 8)][COUNTER]+kwidth+2) x0_sg = np.histogram(x[X_], bins=cinds)[0].astype(float) if X_ == ('TC'): color='k' if analysis_params.bw else analysis_params.colorE # lw = plt.rcParams['lines.linewidth'] # zorder=1 else: color=('k' if analysis_params.bw else analysis_params.colorE, 'gray' if analysis_params.bw else analysis_params.colorI)[j] lw = 0.75 if color in ['gray', 'r', 'b'] else plt.rcParams['lines.linewidth'] zorder = 0 if 'I' in X_ else 1 # plot kernel as correlation of spikegen LFP signal with delta spike train xcorr, vlimround = plotting_correlation(params, x0_sg/x0_sg.sum()**2, data_sg_raw[:, cinds[:-1]]*1E3, ax, normalize=False, lag=kwidth, color=color, scalebar=False, lw=lw, zorder=zorder) if i > 0: ax.set_yticklabels([]) ## Create scale bar ax.plot([kwidth, kwidth], [-1500 + j*3*100, -1400 + j*3*100], lw=2, color=color, clip_on=False) ax.text(kwidth*1.08, -1450 + j*3*100, '%.1f $\mu$V' % vlimround, rotation='vertical', va='center') ax.set_xlim((-5, kwidth)) ax.set_xticks([-20, 0, 20]) ax.set_xticklabels([-20, 0, 20]) COUNTER += 1 ax.set_title(titles[i]) ################################################ # Iterate over savefolders ################################################ for i, (savefolder, lag) in enumerate(zip(savefolders, lags)): # path to simulation files params.savefolder = os.path.join(os.path.split(params.savefolder)[0], savefolder) params.figures_path = os.path.join(params.savefolder, 'figures') params.spike_output_path = os.path.join(params.savefolder, 'processed_nest_output') params.networkSimParams['spike_output_path'] = params.spike_output_path #load spike as database inside function to avoid buggy behaviour networkSim = CachedNetwork(**params.networkSimParams) # Get the Compound LFP: LFPsum : data[nchannels, timepoints ] f = h5py.File(os.path.join(params.savefolder, 'LFPsum.h5')) data_raw = f['data'].value srate = f['srate'].value tvec = np.arange(data_raw.shape[1]) * 1000. / srate # slice inds = (tvec < params.tstop) & (tvec >= transient) data = data_raw[:, inds] # subtract mean dataT = data.T - data.mean(axis=1) data = dataT.T f.close() # Get the spikegen LFP: f = h5py.File(os.path.join(os.path.split(params.savefolder)[0], 'simulation_output_spikegen', 'LFPsum.h5')) data_sg_raw = f['data'].value f.close() ######################################################################## # Part B: STA LFP ######################################################################## titles = ['stLFP(%s)\n(spont.)' % X, 'stLFP(%s)\n(AC. mod.)' % X] ax = fig.add_subplot(gs[:len(channels), 5 + i]) if i == 0: phlp.annotate_subplot(ax, ncols=15, nrows=4, letter=alphabet[i+1], linear_offset=0.02) #collect the spikes x is the times, y is the id of the cell. x, y = networkSim.get_xy([0,params.tstop]) # Get the spikes for the population of interest given as 'Y' bins = np.arange(0, params.tstop+2) + 0.5 x0_raw = np.histogram(x[X], bins=bins)[0] x0 = x0_raw[inds].astype(float) # correlation between firing rate and LFP deviation # from mean normalized by the number of spikes xcorr, vlimround = plotting_correlation(params, x0/x0.sum(), data*1E3, ax, normalize=False, #unit='%.3f mV', lag=lag, scalebar=False, color='k', title=titles[i], ) # Create scale bar ax.plot([lag, lag], [-1500, -1400], lw=2, color='k', clip_on=False) ax.text(lag*1.08, -1450, '%.1f $\mu$V' % vlimround, rotation='vertical', va='center') [Xind] = np.where(np.array(networkSim.X) == X)[0] # create spikegen histogram for population Y x0_sg = np.zeros(x0.shape, dtype=float) x0_sg[activationtimes[Xind]] += params.N_X[Xind] ax.set_yticklabels([]) ax.set_xticks([-lag, 0, lag]) ax.set_xticklabels([-lag, 0, lag]) ########################################################################### # Part C, F: LFP and reconstructed LFP ############################################################################ # create grid_spec gsb = gridspec.GridSpec(2*len(channels)+1, 8) ax = fig.add_subplot(gsb[1+len(channels):, (i*4):(i*4+2)]) phlp.annotate_subplot(ax, ncols=8/2., nrows=4, letter=alphabet[i*3+2], linear_offset=0.02) # extract kernels, force negative lags to be zero kernels = np.zeros((len(params.N_X), 16, kwidth*2)) for j in range(len(params.X)): kernels[j, :, kwidth:] = data_sg_raw[:, (j+2)*100:kwidth+(j+2)*100]/params.N_X[j] LFP_reconst_raw = np.zeros(data_raw.shape) for j, pop in enumerate(params.X): x0_raw = np.histogram(x[pop], bins=bins)[0].astype(float) for ch in range(kernels.shape[1]): LFP_reconst_raw[ch] += np.convolve(x0_raw, kernels[j, ch], 'same') # slice LFP_reconst = LFP_reconst_raw[:, inds] # subtract mean LFP_reconstT = LFP_reconst.T - LFP_reconst.mean(axis=1) LFP_reconst = LFP_reconstT.T vlimround = plot_signal_sum(ax, params, fname=os.path.join(params.savefolder, 'LFPsum.h5'), unit='mV', scalebar=True, T=T, ylim=[-1550, 50], color='k', label='$real$', rasterized=False, zorder=1) plot_signal_sum(ax, params, fname=LFP_reconst_raw, unit='mV', scaling_factor= 1., scalebar=False, vlimround=vlimround, T=T, ylim=[-1550, 50], color='gray' if analysis_params.bw else analysis_params.colorP, label='$reconstr$', rasterized=False, lw=1, zorder=0) ax.set_title('LFP & population \n rate predictor') if i > 0: ax.set_yticklabels([]) ########################################################################### # Part D,G: Correlation coefficient ############################################################################ ax = fig.add_subplot(gsb[1+len(channels):, i*4+2:i*4+3]) phlp.remove_axis_junk(ax) phlp.annotate_subplot(ax, ncols=8./1, nrows=4, letter=alphabet[i*3+3], linear_offset=0.02) cc = np.zeros(len(zvec)) for ch in np.arange(len(zvec)): cc[ch] = np.corrcoef(data[ch], LFP_reconst[ch])[1, 0] ax.barh(zvec, cc, height=80, align='center', color='0.5', linewidth=0.5) # superimpose the chance level, obtained by mixing one input vector n times # while keeping the other fixed. We show boxes drawn left to right where # these denote mean +/- two standard deviations. N = 1000 method = 'randphase' #or 'permute' chance = np.zeros((cc.size, N)) for ch in np.arange(len(zvec)): x1 = LFP_reconst[ch] x1 -= x1.mean() if method == 'randphase': x0 = data[ch] x0 -= x0.mean() X00 = np.fft.fft(x0) for n in range(N): if method == 'permute': x0 = np.random.permutation(datas[ch]) elif method == 'randphase': X0 = np.copy(X00) #random phase information such that spectra is preserved theta = np.random.uniform(0, 2*np.pi, size=X0.size // 2-1) #half-sided real and imaginary component real = abs(X0[1:X0.size // 2])*np.cos(theta) imag = abs(X0[1:X0.size // 2])*np.sin(theta) #account for the antisymmetric phase values X0.imag[1:imag.size+1] = imag X0.imag[imag.size+2:] = -imag[::-1] X0.real[1:real.size+1] = real X0.real[real.size+2:] = real[::-1] x0 = np.fft.ifft(X0).real chance[ch, n] = np.corrcoef(x0, x1)[1, 0] # p-values, compute the fraction of chance correlations > cc at each channel p = [] for h, x in enumerate(cc): p += [(chance[h, ] >= x).sum() / float(N)] print('p-values:', p) #compute the 99% percentile of the chance data right = np.percentile(chance, 99, axis=-1) ax.plot(right, zvec, ':', color='k', lw=1.) ax.set_ylim([-1550, 50]) ax.set_yticklabels([]) ax.set_yticks(zvec) ax.set_xlim([0, 1.]) ax.set_xticks([0.0, 0.5, 1]) ax.yaxis.tick_left() ax.set_xlabel('$cc$ (-)', labelpad=0.1) ax.set_title('corr. \n coef.') print 'correlation coefficients:' print cc ########################################################################### # Part E,H: Power spectra ############################################################################ #compute PSDs ratio between ground truth and estimate freqs, PSD_data = calc_signal_power(params, fname=data, transient=transient, Df=None, mlab=True, NFFT=256, noverlap=128, window=plt.mlab.window_hanning) freqs, PSD_LFP_reconst = calc_signal_power(params, fname=LFP_reconst, transient=transient, Df=None, mlab=True, NFFT=256, noverlap=128, window=plt.mlab.window_hanning) zv = np.r_[params.electrodeParams['z']] zv = np.r_[zv, zv[-1] + np.diff(zv)[-1]] inds = freqs >= 1 # frequencies greater than 1 Hz for j, ch in enumerate(channels): ax = fig.add_subplot(gsb[1+len(channels)+j, (i*4+3):(i*4+4)]) if j == 0: phlp.annotate_subplot(ax, ncols=8./1, nrows=4.5*len(channels), letter=alphabet[i*3+4], linear_offset=0.02) ax.set_title('PSD') phlp.remove_axis_junk(ax) ax.loglog(freqs[inds], PSD_data[ch, inds], 'k', label='LFP', clip_on=True, zorder=1) ax.loglog(freqs[inds], PSD_LFP_reconst[ch, inds], 'gray' if analysis_params.bw else analysis_params.colorP, label='predictor', clip_on=True, lw=1, zorder=0) ax.set_xlim([4E0,4E2]) ax.set_ylim([1E-8, 1E-4]) ax.tick_params(axis='y', which='major', pad=0) ax.set_yticks([1E-8,1E-6,1E-4]) ax.yaxis.set_minor_locator(plt.NullLocator()) ax.text(0.8, 0.9, 'ch. %i' % (ch+1), horizontalalignment='left', verticalalignment='center', fontsize=6, transform=ax.transAxes) if j == 0: ax.set_ylabel('(mV$^2$/Hz)', labelpad=0.) if j > 0: ax.set_yticklabels([]) if j == len(channels)-1: ax.set_xlabel(r'$f$ (Hz)', labelpad=0.) else: ax.set_xticklabels([]) return fig, PSD_LFP_reconst, PSD_data
python
def fig_kernel_lfp(savefolders, params, transient=200, T=[800., 1000.], X='L5E', lags=[20, 20], channels=[0,3,7,11,13]): ''' This function calculates the STA of LFP, extracts kernels and recontructs the LFP from kernels. Arguments :: transient : the time in milliseconds, after which the analysis should begin so as to avoid any starting transients X : id of presynaptic trigger population ''' # Electrode geometry zvec = np.r_[params.electrodeParams['z']] alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' ana_params.set_PLOS_2column_fig_style(ratio=1) # Start the figure fig = plt.figure() fig.subplots_adjust(left=0.06, right=0.95, bottom=0.05, top=0.95, hspace=0.23, wspace=0.55) # create grid_spec gs = gridspec.GridSpec(2*len(channels)+1, 7) ########################################################################### # spikegen "network" activity ############################################################################ # path to simulation files params.savefolder = os.path.join(os.path.split(params.savefolder)[0], 'simulation_output_spikegen') params.figures_path = os.path.join(params.savefolder, 'figures') params.spike_output_path = os.path.join(params.savefolder, 'processed_nest_output') params.networkSimParams['spike_output_path'] = params.spike_output_path # Get the spikegen LFP: f = h5py.File(os.path.join(params.savefolder, 'LFPsum.h5')) srate = f['srate'].value tvec = np.arange(f['data'].shape[1]) * 1000. / srate # slice inds = (tvec < params.tstop) & (tvec >= transient) data_sg_raw = f['data'].value.astype(float) data_sg = data_sg_raw[:, inds] f.close() # kernel width kwidth = 20 # create some dummy spike times activationtimes = np.array([x*100 for x in range(3,11)] + [200]) networkSimSpikegen = CachedNetwork(**params.networkSimParams) x, y = networkSimSpikegen.get_xy([transient, params.tstop]) ########################################################################### # Part A: spatiotemporal kernels, all presynaptic populations ############################################################################ titles = ['TC', 'L23E/I', 'LFP kernels \n L4E/I', 'L5E/I', 'L6E/I', ] COUNTER = 0 for i, X__ in enumerate(([['TC']]) + zip(params.X[1::2], params.X[2::2])): ax = fig.add_subplot(gs[:len(channels), i]) if i == 0: phlp.annotate_subplot(ax, ncols=7, nrows=4, letter=alphabet[0], linear_offset=0.02) for j, X_ in enumerate(X__): # create spikegen histogram for population Y cinds = np.arange(activationtimes[np.arange(-1, 8)][COUNTER]-kwidth, activationtimes[np.arange(-1, 8)][COUNTER]+kwidth+2) x0_sg = np.histogram(x[X_], bins=cinds)[0].astype(float) if X_ == ('TC'): color='k' if analysis_params.bw else analysis_params.colorE # lw = plt.rcParams['lines.linewidth'] # zorder=1 else: color=('k' if analysis_params.bw else analysis_params.colorE, 'gray' if analysis_params.bw else analysis_params.colorI)[j] lw = 0.75 if color in ['gray', 'r', 'b'] else plt.rcParams['lines.linewidth'] zorder = 0 if 'I' in X_ else 1 # plot kernel as correlation of spikegen LFP signal with delta spike train xcorr, vlimround = plotting_correlation(params, x0_sg/x0_sg.sum()**2, data_sg_raw[:, cinds[:-1]]*1E3, ax, normalize=False, lag=kwidth, color=color, scalebar=False, lw=lw, zorder=zorder) if i > 0: ax.set_yticklabels([]) ## Create scale bar ax.plot([kwidth, kwidth], [-1500 + j*3*100, -1400 + j*3*100], lw=2, color=color, clip_on=False) ax.text(kwidth*1.08, -1450 + j*3*100, '%.1f $\mu$V' % vlimround, rotation='vertical', va='center') ax.set_xlim((-5, kwidth)) ax.set_xticks([-20, 0, 20]) ax.set_xticklabels([-20, 0, 20]) COUNTER += 1 ax.set_title(titles[i]) ################################################ # Iterate over savefolders ################################################ for i, (savefolder, lag) in enumerate(zip(savefolders, lags)): # path to simulation files params.savefolder = os.path.join(os.path.split(params.savefolder)[0], savefolder) params.figures_path = os.path.join(params.savefolder, 'figures') params.spike_output_path = os.path.join(params.savefolder, 'processed_nest_output') params.networkSimParams['spike_output_path'] = params.spike_output_path #load spike as database inside function to avoid buggy behaviour networkSim = CachedNetwork(**params.networkSimParams) # Get the Compound LFP: LFPsum : data[nchannels, timepoints ] f = h5py.File(os.path.join(params.savefolder, 'LFPsum.h5')) data_raw = f['data'].value srate = f['srate'].value tvec = np.arange(data_raw.shape[1]) * 1000. / srate # slice inds = (tvec < params.tstop) & (tvec >= transient) data = data_raw[:, inds] # subtract mean dataT = data.T - data.mean(axis=1) data = dataT.T f.close() # Get the spikegen LFP: f = h5py.File(os.path.join(os.path.split(params.savefolder)[0], 'simulation_output_spikegen', 'LFPsum.h5')) data_sg_raw = f['data'].value f.close() ######################################################################## # Part B: STA LFP ######################################################################## titles = ['stLFP(%s)\n(spont.)' % X, 'stLFP(%s)\n(AC. mod.)' % X] ax = fig.add_subplot(gs[:len(channels), 5 + i]) if i == 0: phlp.annotate_subplot(ax, ncols=15, nrows=4, letter=alphabet[i+1], linear_offset=0.02) #collect the spikes x is the times, y is the id of the cell. x, y = networkSim.get_xy([0,params.tstop]) # Get the spikes for the population of interest given as 'Y' bins = np.arange(0, params.tstop+2) + 0.5 x0_raw = np.histogram(x[X], bins=bins)[0] x0 = x0_raw[inds].astype(float) # correlation between firing rate and LFP deviation # from mean normalized by the number of spikes xcorr, vlimround = plotting_correlation(params, x0/x0.sum(), data*1E3, ax, normalize=False, #unit='%.3f mV', lag=lag, scalebar=False, color='k', title=titles[i], ) # Create scale bar ax.plot([lag, lag], [-1500, -1400], lw=2, color='k', clip_on=False) ax.text(lag*1.08, -1450, '%.1f $\mu$V' % vlimround, rotation='vertical', va='center') [Xind] = np.where(np.array(networkSim.X) == X)[0] # create spikegen histogram for population Y x0_sg = np.zeros(x0.shape, dtype=float) x0_sg[activationtimes[Xind]] += params.N_X[Xind] ax.set_yticklabels([]) ax.set_xticks([-lag, 0, lag]) ax.set_xticklabels([-lag, 0, lag]) ########################################################################### # Part C, F: LFP and reconstructed LFP ############################################################################ # create grid_spec gsb = gridspec.GridSpec(2*len(channels)+1, 8) ax = fig.add_subplot(gsb[1+len(channels):, (i*4):(i*4+2)]) phlp.annotate_subplot(ax, ncols=8/2., nrows=4, letter=alphabet[i*3+2], linear_offset=0.02) # extract kernels, force negative lags to be zero kernels = np.zeros((len(params.N_X), 16, kwidth*2)) for j in range(len(params.X)): kernels[j, :, kwidth:] = data_sg_raw[:, (j+2)*100:kwidth+(j+2)*100]/params.N_X[j] LFP_reconst_raw = np.zeros(data_raw.shape) for j, pop in enumerate(params.X): x0_raw = np.histogram(x[pop], bins=bins)[0].astype(float) for ch in range(kernels.shape[1]): LFP_reconst_raw[ch] += np.convolve(x0_raw, kernels[j, ch], 'same') # slice LFP_reconst = LFP_reconst_raw[:, inds] # subtract mean LFP_reconstT = LFP_reconst.T - LFP_reconst.mean(axis=1) LFP_reconst = LFP_reconstT.T vlimround = plot_signal_sum(ax, params, fname=os.path.join(params.savefolder, 'LFPsum.h5'), unit='mV', scalebar=True, T=T, ylim=[-1550, 50], color='k', label='$real$', rasterized=False, zorder=1) plot_signal_sum(ax, params, fname=LFP_reconst_raw, unit='mV', scaling_factor= 1., scalebar=False, vlimround=vlimround, T=T, ylim=[-1550, 50], color='gray' if analysis_params.bw else analysis_params.colorP, label='$reconstr$', rasterized=False, lw=1, zorder=0) ax.set_title('LFP & population \n rate predictor') if i > 0: ax.set_yticklabels([]) ########################################################################### # Part D,G: Correlation coefficient ############################################################################ ax = fig.add_subplot(gsb[1+len(channels):, i*4+2:i*4+3]) phlp.remove_axis_junk(ax) phlp.annotate_subplot(ax, ncols=8./1, nrows=4, letter=alphabet[i*3+3], linear_offset=0.02) cc = np.zeros(len(zvec)) for ch in np.arange(len(zvec)): cc[ch] = np.corrcoef(data[ch], LFP_reconst[ch])[1, 0] ax.barh(zvec, cc, height=80, align='center', color='0.5', linewidth=0.5) # superimpose the chance level, obtained by mixing one input vector n times # while keeping the other fixed. We show boxes drawn left to right where # these denote mean +/- two standard deviations. N = 1000 method = 'randphase' #or 'permute' chance = np.zeros((cc.size, N)) for ch in np.arange(len(zvec)): x1 = LFP_reconst[ch] x1 -= x1.mean() if method == 'randphase': x0 = data[ch] x0 -= x0.mean() X00 = np.fft.fft(x0) for n in range(N): if method == 'permute': x0 = np.random.permutation(datas[ch]) elif method == 'randphase': X0 = np.copy(X00) #random phase information such that spectra is preserved theta = np.random.uniform(0, 2*np.pi, size=X0.size // 2-1) #half-sided real and imaginary component real = abs(X0[1:X0.size // 2])*np.cos(theta) imag = abs(X0[1:X0.size // 2])*np.sin(theta) #account for the antisymmetric phase values X0.imag[1:imag.size+1] = imag X0.imag[imag.size+2:] = -imag[::-1] X0.real[1:real.size+1] = real X0.real[real.size+2:] = real[::-1] x0 = np.fft.ifft(X0).real chance[ch, n] = np.corrcoef(x0, x1)[1, 0] # p-values, compute the fraction of chance correlations > cc at each channel p = [] for h, x in enumerate(cc): p += [(chance[h, ] >= x).sum() / float(N)] print('p-values:', p) #compute the 99% percentile of the chance data right = np.percentile(chance, 99, axis=-1) ax.plot(right, zvec, ':', color='k', lw=1.) ax.set_ylim([-1550, 50]) ax.set_yticklabels([]) ax.set_yticks(zvec) ax.set_xlim([0, 1.]) ax.set_xticks([0.0, 0.5, 1]) ax.yaxis.tick_left() ax.set_xlabel('$cc$ (-)', labelpad=0.1) ax.set_title('corr. \n coef.') print 'correlation coefficients:' print cc ########################################################################### # Part E,H: Power spectra ############################################################################ #compute PSDs ratio between ground truth and estimate freqs, PSD_data = calc_signal_power(params, fname=data, transient=transient, Df=None, mlab=True, NFFT=256, noverlap=128, window=plt.mlab.window_hanning) freqs, PSD_LFP_reconst = calc_signal_power(params, fname=LFP_reconst, transient=transient, Df=None, mlab=True, NFFT=256, noverlap=128, window=plt.mlab.window_hanning) zv = np.r_[params.electrodeParams['z']] zv = np.r_[zv, zv[-1] + np.diff(zv)[-1]] inds = freqs >= 1 # frequencies greater than 1 Hz for j, ch in enumerate(channels): ax = fig.add_subplot(gsb[1+len(channels)+j, (i*4+3):(i*4+4)]) if j == 0: phlp.annotate_subplot(ax, ncols=8./1, nrows=4.5*len(channels), letter=alphabet[i*3+4], linear_offset=0.02) ax.set_title('PSD') phlp.remove_axis_junk(ax) ax.loglog(freqs[inds], PSD_data[ch, inds], 'k', label='LFP', clip_on=True, zorder=1) ax.loglog(freqs[inds], PSD_LFP_reconst[ch, inds], 'gray' if analysis_params.bw else analysis_params.colorP, label='predictor', clip_on=True, lw=1, zorder=0) ax.set_xlim([4E0,4E2]) ax.set_ylim([1E-8, 1E-4]) ax.tick_params(axis='y', which='major', pad=0) ax.set_yticks([1E-8,1E-6,1E-4]) ax.yaxis.set_minor_locator(plt.NullLocator()) ax.text(0.8, 0.9, 'ch. %i' % (ch+1), horizontalalignment='left', verticalalignment='center', fontsize=6, transform=ax.transAxes) if j == 0: ax.set_ylabel('(mV$^2$/Hz)', labelpad=0.) if j > 0: ax.set_yticklabels([]) if j == len(channels)-1: ax.set_xlabel(r'$f$ (Hz)', labelpad=0.) else: ax.set_xticklabels([]) return fig, PSD_LFP_reconst, PSD_data
[ "def", "fig_kernel_lfp", "(", "savefolders", ",", "params", ",", "transient", "=", "200", ",", "T", "=", "[", "800.", ",", "1000.", "]", ",", "X", "=", "'L5E'", ",", "lags", "=", "[", "20", ",", "20", "]", ",", "channels", "=", "[", "0", ",", "...
This function calculates the STA of LFP, extracts kernels and recontructs the LFP from kernels. Arguments :: transient : the time in milliseconds, after which the analysis should begin so as to avoid any starting transients X : id of presynaptic trigger population
[ "This", "function", "calculates", "the", "STA", "of", "LFP", "extracts", "kernels", "and", "recontructs", "the", "LFP", "from", "kernels", ".", "Arguments", "::", "transient", ":", "the", "time", "in", "milliseconds", "after", "which", "the", "analysis", "shou...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/figure_13.py#L86-L490
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/figure_13.py
fig_kernel_lfp_CINPLA
def fig_kernel_lfp_CINPLA(savefolders, params, transient=200, X='L5E', lags=[20, 20]): ''' This function calculates the STA of LFP, extracts kernels and recontructs the LFP from kernels. kwargs: :: transient : the time in milliseconds, after which the analysis should begin so as to avoid any starting transients X : id of presynaptic trigger population ''' # Electrode geometry zvec = np.r_[params.electrodeParams['z']] alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' ana_params.set_PLOS_2column_fig_style(ratio=0.5) # Start the figure fig = plt.figure() fig.subplots_adjust(left=0.06, right=0.95, bottom=0.075, top=0.925, hspace=0.23, wspace=0.55) # create grid_spec gs = gridspec.GridSpec(1, 7) ########################################################################### # Part A: spikegen "network" activity ############################################################################ # path to simulation files params.savefolder = 'simulation_output_spikegen' params.figures_path = os.path.join(params.savefolder, 'figures') params.spike_output_path = os.path.join(params.savefolder, 'processed_nest_output') params.networkSimParams['spike_output_path'] = params.spike_output_path # Get the spikegen LFP: f = h5py.File(os.path.join('simulation_output_spikegen', 'LFPsum.h5')) srate = f['srate'].value tvec = np.arange(f['data'].shape[1]) * 1000. / srate # slice inds = (tvec < params.tstop) & (tvec >= transient) data_sg_raw = f['data'].value.astype(float) f.close() # # kernel width kwidth = 20 # extract kernels kernels = np.zeros((len(params.N_X), 16, 100)) for j in range(len(params.X)): kernels[j] = data_sg_raw[:, 100+kwidth+j*100:100+kwidth+(j+1)*100] / params.N_X[j] # create some dummy spike times activationtimes = np.array([x*100 for x in range(3,11)] + [200]) networkSimSpikegen = CachedNetwork(**params.networkSimParams) x, y = networkSimSpikegen.get_xy([transient, params.tstop]) ########################################################################### # Part A: spatiotemporal kernels, all presynaptic populations ############################################################################ titles = ['TC', 'L23E/I', 'LFP kernels \n L4E/I', 'L5E/I', 'L6E/I', ] COUNTER = 0 for i, X__ in enumerate(([['TC']]) + zip(params.X[1::2], params.X[2::2])): ax = fig.add_subplot(gs[0, i]) if i == 0: phlp.annotate_subplot(ax, ncols=7, nrows=4, letter=alphabet[0], linear_offset=0.02) for j, X_ in enumerate(X__): # create spikegen histogram for population Y cinds = np.arange(activationtimes[np.arange(-1, 8)][COUNTER]-kwidth, activationtimes[np.arange(-1, 8)][COUNTER]+kwidth+2) x0_sg = np.histogram(x[X_], bins=tvec[cinds])[0].astype(float) if X_ == ('TC'): color='r' else: color=('r', 'b')[j] # plot kernel as correlation of spikegen LFP signal with delta spike train xcorr, vlimround = plotting_correlation(x0_sg/x0_sg.sum()**2, data_sg_raw[:, cinds[:-1]]*1E3, ax, normalize=False, lag=kwidth, color=color, scalebar=False) if i > 0: ax.set_yticklabels([]) ## Create scale bar ax.plot([kwidth, kwidth], [-1500 + j*3*100, -1400 + j*3*100], lw=2, color=color, clip_on=False) ax.text(kwidth*1.08, -1450 + j*3*100, '%.1f $\mu$V' % vlimround, rotation='vertical', va='center') ax.set_xlim((-5, kwidth)) ax.set_xticks([-20, 0, 20]) ax.set_xticklabels([-20, 0, 20]) COUNTER += 1 ax.set_title(titles[i]) for i, (savefolder, lag) in enumerate(zip(savefolders, lags)): # path to simulation files params.savefolder = os.path.join(os.path.split(params.savefolder)[0], savefolder) params.figures_path = os.path.join(params.savefolder, 'figures') params.spike_output_path = os.path.join(params.savefolder, 'processed_nest_output') params.networkSimParams['spike_output_path'] = params.spike_output_path #load spike as database inside function to avoid buggy behaviour networkSim = CachedNetwork(**params.networkSimParams) # Get the Compound LFP: LFPsum : data[nchannels, timepoints ] f = h5py.File(os.path.join(params.savefolder, 'LFPsum.h5')) data_raw = f['data'].value srate = f['srate'].value tvec = np.arange(data_raw.shape[1]) * 1000. / srate # slice inds = (tvec < params.tstop) & (tvec >= transient) data = data_raw[:,inds] # subtract mean dataT = data.T - data.mean(axis=1) data = dataT.T f.close() # Get the spikegen LFP: f = h5py.File(os.path.join('simulation_output_spikegen', 'LFPsum.h5')) data_sg_raw = f['data'].value # slice data_sg = data_sg_raw[:,inds[data_sg_raw.shape[1]]] f.close() ######################################################################## # Part B: STA LFP ######################################################################## ax = fig.add_subplot(gs[0, 5 + i]) phlp.annotate_subplot(ax, ncols=15, nrows=4, letter=alphabet[i+1], linear_offset=0.02) # collect the spikes x is the times, y is the id of the cell. x, y = networkSim.get_xy([0,params.tstop]) # Get the spikes for the population of interest given as 'Y' bins = np.arange(0, params.tstop+2) x0_raw = np.histogram(x[X], bins=bins)[0] x0 = x0_raw[inds].astype(float) # correlation between firing rate and LFP deviation # from mean normalized by the number of spikes xcorr, vlimround = plotting_correlation(x0/x0.sum(), data*1E3, ax, normalize=False, #unit='%.3f mV', lag=lag, scalebar=False, color='k', title='stLFP\n(trigger %s)' %X, ) # Create scale bar ax.plot([lag, lag], [-1500, -1400], lw=2, color='k', clip_on=False) ax.text(lag*1.04, -1450, '%.1f $\mu$V' % vlimround, rotation='vertical', va='center') [Xind] = np.where(np.array(networkSim.X) == X)[0] # create spikegen histogram for population Y x0_sg = np.zeros(x0.shape, dtype=float) x0_sg[activationtimes[Xind]] += params.N_X[Xind] ax.set_yticklabels([]) ax.set_xticks([-lag, 0, lag]) ax.set_xticklabels([-lag, 0, lag]) return fig
python
def fig_kernel_lfp_CINPLA(savefolders, params, transient=200, X='L5E', lags=[20, 20]): ''' This function calculates the STA of LFP, extracts kernels and recontructs the LFP from kernels. kwargs: :: transient : the time in milliseconds, after which the analysis should begin so as to avoid any starting transients X : id of presynaptic trigger population ''' # Electrode geometry zvec = np.r_[params.electrodeParams['z']] alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' ana_params.set_PLOS_2column_fig_style(ratio=0.5) # Start the figure fig = plt.figure() fig.subplots_adjust(left=0.06, right=0.95, bottom=0.075, top=0.925, hspace=0.23, wspace=0.55) # create grid_spec gs = gridspec.GridSpec(1, 7) ########################################################################### # Part A: spikegen "network" activity ############################################################################ # path to simulation files params.savefolder = 'simulation_output_spikegen' params.figures_path = os.path.join(params.savefolder, 'figures') params.spike_output_path = os.path.join(params.savefolder, 'processed_nest_output') params.networkSimParams['spike_output_path'] = params.spike_output_path # Get the spikegen LFP: f = h5py.File(os.path.join('simulation_output_spikegen', 'LFPsum.h5')) srate = f['srate'].value tvec = np.arange(f['data'].shape[1]) * 1000. / srate # slice inds = (tvec < params.tstop) & (tvec >= transient) data_sg_raw = f['data'].value.astype(float) f.close() # # kernel width kwidth = 20 # extract kernels kernels = np.zeros((len(params.N_X), 16, 100)) for j in range(len(params.X)): kernels[j] = data_sg_raw[:, 100+kwidth+j*100:100+kwidth+(j+1)*100] / params.N_X[j] # create some dummy spike times activationtimes = np.array([x*100 for x in range(3,11)] + [200]) networkSimSpikegen = CachedNetwork(**params.networkSimParams) x, y = networkSimSpikegen.get_xy([transient, params.tstop]) ########################################################################### # Part A: spatiotemporal kernels, all presynaptic populations ############################################################################ titles = ['TC', 'L23E/I', 'LFP kernels \n L4E/I', 'L5E/I', 'L6E/I', ] COUNTER = 0 for i, X__ in enumerate(([['TC']]) + zip(params.X[1::2], params.X[2::2])): ax = fig.add_subplot(gs[0, i]) if i == 0: phlp.annotate_subplot(ax, ncols=7, nrows=4, letter=alphabet[0], linear_offset=0.02) for j, X_ in enumerate(X__): # create spikegen histogram for population Y cinds = np.arange(activationtimes[np.arange(-1, 8)][COUNTER]-kwidth, activationtimes[np.arange(-1, 8)][COUNTER]+kwidth+2) x0_sg = np.histogram(x[X_], bins=tvec[cinds])[0].astype(float) if X_ == ('TC'): color='r' else: color=('r', 'b')[j] # plot kernel as correlation of spikegen LFP signal with delta spike train xcorr, vlimround = plotting_correlation(x0_sg/x0_sg.sum()**2, data_sg_raw[:, cinds[:-1]]*1E3, ax, normalize=False, lag=kwidth, color=color, scalebar=False) if i > 0: ax.set_yticklabels([]) ## Create scale bar ax.plot([kwidth, kwidth], [-1500 + j*3*100, -1400 + j*3*100], lw=2, color=color, clip_on=False) ax.text(kwidth*1.08, -1450 + j*3*100, '%.1f $\mu$V' % vlimround, rotation='vertical', va='center') ax.set_xlim((-5, kwidth)) ax.set_xticks([-20, 0, 20]) ax.set_xticklabels([-20, 0, 20]) COUNTER += 1 ax.set_title(titles[i]) for i, (savefolder, lag) in enumerate(zip(savefolders, lags)): # path to simulation files params.savefolder = os.path.join(os.path.split(params.savefolder)[0], savefolder) params.figures_path = os.path.join(params.savefolder, 'figures') params.spike_output_path = os.path.join(params.savefolder, 'processed_nest_output') params.networkSimParams['spike_output_path'] = params.spike_output_path #load spike as database inside function to avoid buggy behaviour networkSim = CachedNetwork(**params.networkSimParams) # Get the Compound LFP: LFPsum : data[nchannels, timepoints ] f = h5py.File(os.path.join(params.savefolder, 'LFPsum.h5')) data_raw = f['data'].value srate = f['srate'].value tvec = np.arange(data_raw.shape[1]) * 1000. / srate # slice inds = (tvec < params.tstop) & (tvec >= transient) data = data_raw[:,inds] # subtract mean dataT = data.T - data.mean(axis=1) data = dataT.T f.close() # Get the spikegen LFP: f = h5py.File(os.path.join('simulation_output_spikegen', 'LFPsum.h5')) data_sg_raw = f['data'].value # slice data_sg = data_sg_raw[:,inds[data_sg_raw.shape[1]]] f.close() ######################################################################## # Part B: STA LFP ######################################################################## ax = fig.add_subplot(gs[0, 5 + i]) phlp.annotate_subplot(ax, ncols=15, nrows=4, letter=alphabet[i+1], linear_offset=0.02) # collect the spikes x is the times, y is the id of the cell. x, y = networkSim.get_xy([0,params.tstop]) # Get the spikes for the population of interest given as 'Y' bins = np.arange(0, params.tstop+2) x0_raw = np.histogram(x[X], bins=bins)[0] x0 = x0_raw[inds].astype(float) # correlation between firing rate and LFP deviation # from mean normalized by the number of spikes xcorr, vlimround = plotting_correlation(x0/x0.sum(), data*1E3, ax, normalize=False, #unit='%.3f mV', lag=lag, scalebar=False, color='k', title='stLFP\n(trigger %s)' %X, ) # Create scale bar ax.plot([lag, lag], [-1500, -1400], lw=2, color='k', clip_on=False) ax.text(lag*1.04, -1450, '%.1f $\mu$V' % vlimround, rotation='vertical', va='center') [Xind] = np.where(np.array(networkSim.X) == X)[0] # create spikegen histogram for population Y x0_sg = np.zeros(x0.shape, dtype=float) x0_sg[activationtimes[Xind]] += params.N_X[Xind] ax.set_yticklabels([]) ax.set_xticks([-lag, 0, lag]) ax.set_xticklabels([-lag, 0, lag]) return fig
[ "def", "fig_kernel_lfp_CINPLA", "(", "savefolders", ",", "params", ",", "transient", "=", "200", ",", "X", "=", "'L5E'", ",", "lags", "=", "[", "20", ",", "20", "]", ")", ":", "# Electrode geometry", "zvec", "=", "np", ".", "r_", "[", "params", ".", ...
This function calculates the STA of LFP, extracts kernels and recontructs the LFP from kernels. kwargs: :: transient : the time in milliseconds, after which the analysis should begin so as to avoid any starting transients X : id of presynaptic trigger population
[ "This", "function", "calculates", "the", "STA", "of", "LFP", "extracts", "kernels", "and", "recontructs", "the", "LFP", "from", "kernels", ".", "kwargs", ":", "::", "transient", ":", "the", "time", "in", "milliseconds", "after", "which", "the", "analysis", "...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/figure_13.py#L493-L700
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/figure_13.py
fig_kernel_lfp_EITN_II
def fig_kernel_lfp_EITN_II(savefolders, params, transient=200, T=[800., 1000.], X='L5E', lags=[20, 20], channels=[0,3,7,11,13]): ''' This function calculates the STA of LFP, extracts kernels and recontructs the LFP from kernels. Arguments :: transient : the time in milliseconds, after which the analysis should begin so as to avoid any starting transients X : id of presynaptic trigger population ''' # Electrode geometry zvec = np.r_[params.electrodeParams['z']] alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' ana_params.set_PLOS_2column_fig_style(ratio=0.5) # Start the figure fig = plt.figure() fig.subplots_adjust(left=0.06, right=0.95, bottom=0.08, top=0.9, hspace=0.23, wspace=0.55) # create grid_spec gs = gridspec.GridSpec(len(channels), 7) ########################################################################### # spikegen "network" activity ############################################################################ # path to simulation files savefolder = 'simulation_output_spikegen' params.savefolder = os.path.join(os.path.split(params.savefolder)[0], savefolder) params.figures_path = os.path.join(params.savefolder, 'figures') params.spike_output_path = os.path.join(params.savefolder, 'processed_nest_output') params.networkSimParams['spike_output_path'] = params.spike_output_path # Get the spikegen LFP: f = h5py.File(os.path.join('simulation_output_spikegen', 'LFPsum.h5')) srate = f['srate'].value tvec = np.arange(f['data'].shape[1]) * 1000. / srate # slice inds = (tvec < params.tstop) & (tvec >= transient) data_sg_raw = f['data'].value.astype(float) data_sg = data_sg_raw[:, inds] f.close() # kernel width kwidth = 20 # create some dummy spike times activationtimes = np.array([x*100 for x in range(3,11)] + [200]) networkSimSpikegen = CachedNetwork(**params.networkSimParams) x, y = networkSimSpikegen.get_xy([transient, params.tstop]) ############################################################################ ## Part A: spatiotemporal kernels, all presynaptic populations ############################################################################# # #titles = ['TC', # 'L23E/I', # 'LFP kernels \n L4E/I', # 'L5E/I', # 'L6E/I', # ] # #COUNTER = 0 #for i, X__ in enumerate(([['TC']]) + zip(params.X[1::2], params.X[2::2])): # ax = fig.add_subplot(gs[:len(channels), i]) # if i == 0: # phlp.annotate_subplot(ax, ncols=7, nrows=4, letter=alphabet[0], linear_offset=0.02) # # for j, X_ in enumerate(X__): # # create spikegen histogram for population Y # cinds = np.arange(activationtimes[np.arange(-1, 8)][COUNTER]-kwidth, # activationtimes[np.arange(-1, 8)][COUNTER]+kwidth+2) # x0_sg = np.histogram(x[X_], bins=cinds)[0].astype(float) # # if X_ == ('TC'): # color='r' # else: # color=('r', 'b')[j] # # # # plot kernel as correlation of spikegen LFP signal with delta spike train # xcorr, vlimround = plotting_correlation(params, # x0_sg/x0_sg.sum()**2, # data_sg_raw[:, cinds[:-1]]*1E3, # ax, normalize=False, # lag=kwidth, # color=color, # scalebar=False) # if i > 0: # ax.set_yticklabels([]) # # ## Create scale bar # ax.plot([kwidth, kwidth], # [-1500 + j*3*100, -1400 + j*3*100], lw=2, color=color, # clip_on=False) # ax.text(kwidth*1.08, -1450 + j*3*100, '%.1f $\mu$V' % vlimround, # rotation='vertical', va='center') # # ax.set_xlim((-5, kwidth)) # ax.set_xticks([-20, 0, 20]) # ax.set_xticklabels([-20, 0, 20]) # # COUNTER += 1 # # ax.set_title(titles[i]) ################################################ # Iterate over savefolders ################################################ for i, (savefolder, lag) in enumerate(zip(savefolders, lags)): # path to simulation files params.savefolder = os.path.join(os.path.split(params.savefolder)[0], savefolder) params.figures_path = os.path.join(params.savefolder, 'figures') params.spike_output_path = os.path.join(params.savefolder, 'processed_nest_output') params.networkSimParams['spike_output_path'] = params.spike_output_path #load spike as database inside function to avoid buggy behaviour networkSim = CachedNetwork(**params.networkSimParams) # Get the Compound LFP: LFPsum : data[nchannels, timepoints ] f = h5py.File(os.path.join(params.savefolder, 'LFPsum.h5')) data_raw = f['data'].value srate = f['srate'].value tvec = np.arange(data_raw.shape[1]) * 1000. / srate # slice inds = (tvec < params.tstop) & (tvec >= transient) data = data_raw[:, inds] # subtract mean dataT = data.T - data.mean(axis=1) data = dataT.T f.close() # Get the spikegen LFP: f = h5py.File(os.path.join('simulation_output_spikegen', 'LFPsum.h5')) data_sg_raw = f['data'].value f.close() # # # # ######################################################################### ## Part B: STA LFP ######################################################################### # #titles = ['staLFP(%s)\n(spont.)' % X, 'staLFP(%s)\n(AC. mod.)' % X] #ax = fig.add_subplot(gs[:len(channels), 5 + i]) #if i == 0: # phlp.annotate_subplot(ax, ncols=15, nrows=4, letter=alphabet[i+1], # linear_offset=0.02) # #collect the spikes x is the times, y is the id of the cell. x, y = networkSim.get_xy([0,params.tstop]) # ## Get the spikes for the population of interest given as 'Y' bins = np.arange(0, params.tstop+2) + 0.5 x0_raw = np.histogram(x[X], bins=bins)[0] x0 = x0_raw[inds].astype(float) # ## correlation between firing rate and LFP deviation ## from mean normalized by the number of spikes #xcorr, vlimround = plotting_correlation(params, # x0/x0.sum(), # data*1E3, # ax, normalize=False, # #unit='%.3f mV', # lag=lag, # scalebar=False, # color='k', # title=titles[i], # ) # ## Create scale bar #ax.plot([lag, lag], # [-1500, -1400], lw=2, color='k', # clip_on=False) #ax.text(lag*1.08, -1450, '%.1f $\mu$V' % vlimround, # rotation='vertical', va='center') # # #[Xind] = np.where(np.array(networkSim.X) == X)[0] # ## create spikegen histogram for population Y #x0_sg = np.zeros(x0.shape, dtype=float) #x0_sg[activationtimes[Xind]] += params.N_X[Xind] # # #ax.set_yticklabels([]) #ax.set_xticks([-lag, 0, lag]) #ax.set_xticklabels([-lag, 0, lag]) ########################################################################### # Part C, F: LFP and reconstructed LFP ############################################################################ # create grid_spec gsb = gridspec.GridSpec(len(channels), 8) ax = fig.add_subplot(gsb[:, (i*4):(i*4+2)]) phlp.annotate_subplot(ax, ncols=8/2., nrows=4, letter=alphabet[i*3+2], linear_offset=0.02) # extract kernels, force negative lags to be zero kernels = np.zeros((len(params.N_X), 16, kwidth*2)) for j in range(len(params.X)): kernels[j, :, kwidth:] = data_sg_raw[:, (j+2)*100:kwidth+(j+2)*100]/params.N_X[j] LFP_reconst_raw = np.zeros(data_raw.shape) for j, pop in enumerate(params.X): x0_raw = np.histogram(x[pop], bins=bins)[0].astype(float) for ch in range(kernels.shape[1]): LFP_reconst_raw[ch] += np.convolve(x0_raw, kernels[j, ch], 'same') # slice LFP_reconst = LFP_reconst_raw[:, inds] # subtract mean LFP_reconstT = LFP_reconst.T - LFP_reconst.mean(axis=1) LFP_reconst = LFP_reconstT.T vlimround = plot_signal_sum(ax, params, fname=os.path.join(params.savefolder, 'LFPsum.h5'), unit='mV', scalebar=True, T=T, ylim=[-1550, 50], color='k', label='$real$', rasterized=False) plot_signal_sum(ax, params, fname=LFP_reconst_raw, unit='mV', scaling_factor= 1., scalebar=False, vlimround=vlimround, T=T, ylim=[-1550, 50], color='r', label='$reconstr$', rasterized=False) ax.set_title('LFP & population \n rate predictor') if i > 0: ax.set_yticklabels([]) ########################################################################### # Part D,G: Correlation coefficient ############################################################################ ax = fig.add_subplot(gsb[:, i*4+2:i*4+3]) phlp.remove_axis_junk(ax) phlp.annotate_subplot(ax, ncols=8./1, nrows=4, letter=alphabet[i*3+3], linear_offset=0.02) cc = np.zeros(len(zvec)) for ch in np.arange(len(zvec)): cc[ch] = np.corrcoef(data[ch], LFP_reconst[ch])[1, 0] ax.barh(zvec, cc, height=90, align='center', color='1', linewidth=0.5) ax.set_ylim([-1550, 50]) ax.set_yticklabels([]) ax.set_yticks(zvec) ax.set_xlim([0.0, 1.]) ax.set_xticks([0.0, 0.5, 1]) ax.yaxis.tick_left() ax.set_xlabel('$cc$ (-)', labelpad=0.1) ax.set_title('corr. \n coef.') print 'correlation coefficients:' print cc ########################################################################### # Part E,H: Power spectra ############################################################################ #compute PSDs ratio between ground truth and estimate freqs, PSD_data = calc_signal_power(params, fname=data, transient=transient, Df=None, mlab=True, NFFT=256, noverlap=128, window=plt.mlab.window_hanning) freqs, PSD_LFP_reconst = calc_signal_power(params, fname=LFP_reconst, transient=transient, Df=None, mlab=True, NFFT=256, noverlap=128, window=plt.mlab.window_hanning) zv = np.r_[params.electrodeParams['z']] zv = np.r_[zv, zv[-1] + np.diff(zv)[-1]] inds = freqs >= 1 # frequencies greater than 1 Hz for j, ch in enumerate(channels): ax = fig.add_subplot(gsb[j, (i*4+3):(i*4+4)]) if j == 0: phlp.annotate_subplot(ax, ncols=8./1, nrows=4.5*len(channels), letter=alphabet[i*3+4], linear_offset=0.02) ax.set_title('PSD') phlp.remove_axis_junk(ax) ax.loglog(freqs[inds], PSD_data[ch, inds], 'k', label='LFP', clip_on=True) ax.loglog(freqs[inds], PSD_LFP_reconst[ch, inds], 'r', label='predictor', clip_on=True) ax.set_xlim([4E0,4E2]) ax.set_ylim([1E-8, 1E-4]) ax.tick_params(axis='y', which='major', pad=0) ax.set_yticks([1E-8,1E-6,1E-4]) ax.yaxis.set_minor_locator(plt.NullLocator()) ax.text(0.8, 0.9, 'ch. %i' % (ch+1), horizontalalignment='left', verticalalignment='center', fontsize=6, transform=ax.transAxes) if j == 0: ax.set_ylabel('(mV$^2$/Hz)', labelpad=0.) if j > 0: ax.set_yticklabels([]) if j == len(channels)-1: ax.set_xlabel(r'$f$ (Hz)', labelpad=0.) else: ax.set_xticklabels([]) return fig, PSD_LFP_reconst, PSD_data
python
def fig_kernel_lfp_EITN_II(savefolders, params, transient=200, T=[800., 1000.], X='L5E', lags=[20, 20], channels=[0,3,7,11,13]): ''' This function calculates the STA of LFP, extracts kernels and recontructs the LFP from kernels. Arguments :: transient : the time in milliseconds, after which the analysis should begin so as to avoid any starting transients X : id of presynaptic trigger population ''' # Electrode geometry zvec = np.r_[params.electrodeParams['z']] alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' ana_params.set_PLOS_2column_fig_style(ratio=0.5) # Start the figure fig = plt.figure() fig.subplots_adjust(left=0.06, right=0.95, bottom=0.08, top=0.9, hspace=0.23, wspace=0.55) # create grid_spec gs = gridspec.GridSpec(len(channels), 7) ########################################################################### # spikegen "network" activity ############################################################################ # path to simulation files savefolder = 'simulation_output_spikegen' params.savefolder = os.path.join(os.path.split(params.savefolder)[0], savefolder) params.figures_path = os.path.join(params.savefolder, 'figures') params.spike_output_path = os.path.join(params.savefolder, 'processed_nest_output') params.networkSimParams['spike_output_path'] = params.spike_output_path # Get the spikegen LFP: f = h5py.File(os.path.join('simulation_output_spikegen', 'LFPsum.h5')) srate = f['srate'].value tvec = np.arange(f['data'].shape[1]) * 1000. / srate # slice inds = (tvec < params.tstop) & (tvec >= transient) data_sg_raw = f['data'].value.astype(float) data_sg = data_sg_raw[:, inds] f.close() # kernel width kwidth = 20 # create some dummy spike times activationtimes = np.array([x*100 for x in range(3,11)] + [200]) networkSimSpikegen = CachedNetwork(**params.networkSimParams) x, y = networkSimSpikegen.get_xy([transient, params.tstop]) ############################################################################ ## Part A: spatiotemporal kernels, all presynaptic populations ############################################################################# # #titles = ['TC', # 'L23E/I', # 'LFP kernels \n L4E/I', # 'L5E/I', # 'L6E/I', # ] # #COUNTER = 0 #for i, X__ in enumerate(([['TC']]) + zip(params.X[1::2], params.X[2::2])): # ax = fig.add_subplot(gs[:len(channels), i]) # if i == 0: # phlp.annotate_subplot(ax, ncols=7, nrows=4, letter=alphabet[0], linear_offset=0.02) # # for j, X_ in enumerate(X__): # # create spikegen histogram for population Y # cinds = np.arange(activationtimes[np.arange(-1, 8)][COUNTER]-kwidth, # activationtimes[np.arange(-1, 8)][COUNTER]+kwidth+2) # x0_sg = np.histogram(x[X_], bins=cinds)[0].astype(float) # # if X_ == ('TC'): # color='r' # else: # color=('r', 'b')[j] # # # # plot kernel as correlation of spikegen LFP signal with delta spike train # xcorr, vlimround = plotting_correlation(params, # x0_sg/x0_sg.sum()**2, # data_sg_raw[:, cinds[:-1]]*1E3, # ax, normalize=False, # lag=kwidth, # color=color, # scalebar=False) # if i > 0: # ax.set_yticklabels([]) # # ## Create scale bar # ax.plot([kwidth, kwidth], # [-1500 + j*3*100, -1400 + j*3*100], lw=2, color=color, # clip_on=False) # ax.text(kwidth*1.08, -1450 + j*3*100, '%.1f $\mu$V' % vlimround, # rotation='vertical', va='center') # # ax.set_xlim((-5, kwidth)) # ax.set_xticks([-20, 0, 20]) # ax.set_xticklabels([-20, 0, 20]) # # COUNTER += 1 # # ax.set_title(titles[i]) ################################################ # Iterate over savefolders ################################################ for i, (savefolder, lag) in enumerate(zip(savefolders, lags)): # path to simulation files params.savefolder = os.path.join(os.path.split(params.savefolder)[0], savefolder) params.figures_path = os.path.join(params.savefolder, 'figures') params.spike_output_path = os.path.join(params.savefolder, 'processed_nest_output') params.networkSimParams['spike_output_path'] = params.spike_output_path #load spike as database inside function to avoid buggy behaviour networkSim = CachedNetwork(**params.networkSimParams) # Get the Compound LFP: LFPsum : data[nchannels, timepoints ] f = h5py.File(os.path.join(params.savefolder, 'LFPsum.h5')) data_raw = f['data'].value srate = f['srate'].value tvec = np.arange(data_raw.shape[1]) * 1000. / srate # slice inds = (tvec < params.tstop) & (tvec >= transient) data = data_raw[:, inds] # subtract mean dataT = data.T - data.mean(axis=1) data = dataT.T f.close() # Get the spikegen LFP: f = h5py.File(os.path.join('simulation_output_spikegen', 'LFPsum.h5')) data_sg_raw = f['data'].value f.close() # # # # ######################################################################### ## Part B: STA LFP ######################################################################### # #titles = ['staLFP(%s)\n(spont.)' % X, 'staLFP(%s)\n(AC. mod.)' % X] #ax = fig.add_subplot(gs[:len(channels), 5 + i]) #if i == 0: # phlp.annotate_subplot(ax, ncols=15, nrows=4, letter=alphabet[i+1], # linear_offset=0.02) # #collect the spikes x is the times, y is the id of the cell. x, y = networkSim.get_xy([0,params.tstop]) # ## Get the spikes for the population of interest given as 'Y' bins = np.arange(0, params.tstop+2) + 0.5 x0_raw = np.histogram(x[X], bins=bins)[0] x0 = x0_raw[inds].astype(float) # ## correlation between firing rate and LFP deviation ## from mean normalized by the number of spikes #xcorr, vlimround = plotting_correlation(params, # x0/x0.sum(), # data*1E3, # ax, normalize=False, # #unit='%.3f mV', # lag=lag, # scalebar=False, # color='k', # title=titles[i], # ) # ## Create scale bar #ax.plot([lag, lag], # [-1500, -1400], lw=2, color='k', # clip_on=False) #ax.text(lag*1.08, -1450, '%.1f $\mu$V' % vlimround, # rotation='vertical', va='center') # # #[Xind] = np.where(np.array(networkSim.X) == X)[0] # ## create spikegen histogram for population Y #x0_sg = np.zeros(x0.shape, dtype=float) #x0_sg[activationtimes[Xind]] += params.N_X[Xind] # # #ax.set_yticklabels([]) #ax.set_xticks([-lag, 0, lag]) #ax.set_xticklabels([-lag, 0, lag]) ########################################################################### # Part C, F: LFP and reconstructed LFP ############################################################################ # create grid_spec gsb = gridspec.GridSpec(len(channels), 8) ax = fig.add_subplot(gsb[:, (i*4):(i*4+2)]) phlp.annotate_subplot(ax, ncols=8/2., nrows=4, letter=alphabet[i*3+2], linear_offset=0.02) # extract kernels, force negative lags to be zero kernels = np.zeros((len(params.N_X), 16, kwidth*2)) for j in range(len(params.X)): kernels[j, :, kwidth:] = data_sg_raw[:, (j+2)*100:kwidth+(j+2)*100]/params.N_X[j] LFP_reconst_raw = np.zeros(data_raw.shape) for j, pop in enumerate(params.X): x0_raw = np.histogram(x[pop], bins=bins)[0].astype(float) for ch in range(kernels.shape[1]): LFP_reconst_raw[ch] += np.convolve(x0_raw, kernels[j, ch], 'same') # slice LFP_reconst = LFP_reconst_raw[:, inds] # subtract mean LFP_reconstT = LFP_reconst.T - LFP_reconst.mean(axis=1) LFP_reconst = LFP_reconstT.T vlimround = plot_signal_sum(ax, params, fname=os.path.join(params.savefolder, 'LFPsum.h5'), unit='mV', scalebar=True, T=T, ylim=[-1550, 50], color='k', label='$real$', rasterized=False) plot_signal_sum(ax, params, fname=LFP_reconst_raw, unit='mV', scaling_factor= 1., scalebar=False, vlimround=vlimround, T=T, ylim=[-1550, 50], color='r', label='$reconstr$', rasterized=False) ax.set_title('LFP & population \n rate predictor') if i > 0: ax.set_yticklabels([]) ########################################################################### # Part D,G: Correlation coefficient ############################################################################ ax = fig.add_subplot(gsb[:, i*4+2:i*4+3]) phlp.remove_axis_junk(ax) phlp.annotate_subplot(ax, ncols=8./1, nrows=4, letter=alphabet[i*3+3], linear_offset=0.02) cc = np.zeros(len(zvec)) for ch in np.arange(len(zvec)): cc[ch] = np.corrcoef(data[ch], LFP_reconst[ch])[1, 0] ax.barh(zvec, cc, height=90, align='center', color='1', linewidth=0.5) ax.set_ylim([-1550, 50]) ax.set_yticklabels([]) ax.set_yticks(zvec) ax.set_xlim([0.0, 1.]) ax.set_xticks([0.0, 0.5, 1]) ax.yaxis.tick_left() ax.set_xlabel('$cc$ (-)', labelpad=0.1) ax.set_title('corr. \n coef.') print 'correlation coefficients:' print cc ########################################################################### # Part E,H: Power spectra ############################################################################ #compute PSDs ratio between ground truth and estimate freqs, PSD_data = calc_signal_power(params, fname=data, transient=transient, Df=None, mlab=True, NFFT=256, noverlap=128, window=plt.mlab.window_hanning) freqs, PSD_LFP_reconst = calc_signal_power(params, fname=LFP_reconst, transient=transient, Df=None, mlab=True, NFFT=256, noverlap=128, window=plt.mlab.window_hanning) zv = np.r_[params.electrodeParams['z']] zv = np.r_[zv, zv[-1] + np.diff(zv)[-1]] inds = freqs >= 1 # frequencies greater than 1 Hz for j, ch in enumerate(channels): ax = fig.add_subplot(gsb[j, (i*4+3):(i*4+4)]) if j == 0: phlp.annotate_subplot(ax, ncols=8./1, nrows=4.5*len(channels), letter=alphabet[i*3+4], linear_offset=0.02) ax.set_title('PSD') phlp.remove_axis_junk(ax) ax.loglog(freqs[inds], PSD_data[ch, inds], 'k', label='LFP', clip_on=True) ax.loglog(freqs[inds], PSD_LFP_reconst[ch, inds], 'r', label='predictor', clip_on=True) ax.set_xlim([4E0,4E2]) ax.set_ylim([1E-8, 1E-4]) ax.tick_params(axis='y', which='major', pad=0) ax.set_yticks([1E-8,1E-6,1E-4]) ax.yaxis.set_minor_locator(plt.NullLocator()) ax.text(0.8, 0.9, 'ch. %i' % (ch+1), horizontalalignment='left', verticalalignment='center', fontsize=6, transform=ax.transAxes) if j == 0: ax.set_ylabel('(mV$^2$/Hz)', labelpad=0.) if j > 0: ax.set_yticklabels([]) if j == len(channels)-1: ax.set_xlabel(r'$f$ (Hz)', labelpad=0.) else: ax.set_xticklabels([]) return fig, PSD_LFP_reconst, PSD_data
[ "def", "fig_kernel_lfp_EITN_II", "(", "savefolders", ",", "params", ",", "transient", "=", "200", ",", "T", "=", "[", "800.", ",", "1000.", "]", ",", "X", "=", "'L5E'", ",", "lags", "=", "[", "20", ",", "20", "]", ",", "channels", "=", "[", "0", ...
This function calculates the STA of LFP, extracts kernels and recontructs the LFP from kernels. Arguments :: transient : the time in milliseconds, after which the analysis should begin so as to avoid any starting transients X : id of presynaptic trigger population
[ "This", "function", "calculates", "the", "STA", "of", "LFP", "extracts", "kernels", "and", "recontructs", "the", "LFP", "from", "kernels", ".", "Arguments", "::", "transient", ":", "the", "time", "in", "milliseconds", "after", "which", "the", "analysis", "shou...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/figure_13.py#L919-L1264
tkf/rash
rash/watchrecord.py
watch_record
def watch_record(indexer, use_polling=False): """ Start watching `cfstore.record_path`. :type indexer: rash.indexer.Indexer """ if use_polling: from watchdog.observers.polling import PollingObserver as Observer Observer # fool pyflakes else: from watchdog.observers import Observer event_handler = RecordHandler(indexer) observer = Observer() observer.schedule(event_handler, path=indexer.record_path, recursive=True) indexer.logger.debug('Start observer.') observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: indexer.logger.debug('Got KeyboardInterrupt. Stopping observer.') observer.stop() indexer.logger.debug('Joining observer.') observer.join() indexer.logger.debug('Finish watching record.')
python
def watch_record(indexer, use_polling=False): """ Start watching `cfstore.record_path`. :type indexer: rash.indexer.Indexer """ if use_polling: from watchdog.observers.polling import PollingObserver as Observer Observer # fool pyflakes else: from watchdog.observers import Observer event_handler = RecordHandler(indexer) observer = Observer() observer.schedule(event_handler, path=indexer.record_path, recursive=True) indexer.logger.debug('Start observer.') observer.start() try: while True: time.sleep(1) except KeyboardInterrupt: indexer.logger.debug('Got KeyboardInterrupt. Stopping observer.') observer.stop() indexer.logger.debug('Joining observer.') observer.join() indexer.logger.debug('Finish watching record.')
[ "def", "watch_record", "(", "indexer", ",", "use_polling", "=", "False", ")", ":", "if", "use_polling", ":", "from", "watchdog", ".", "observers", ".", "polling", "import", "PollingObserver", "as", "Observer", "Observer", "# fool pyflakes", "else", ":", "from", ...
Start watching `cfstore.record_path`. :type indexer: rash.indexer.Indexer
[ "Start", "watching", "cfstore", ".", "record_path", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/watchrecord.py#L48-L74
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/Fig3/Fig3.py
run_sim
def run_sim(morphology='patdemo/cells/j4a.hoc', cell_rotation=dict(x=4.99, y=-4.33, z=3.14), closest_idx=dict(x=-200., y=0., z=800.)): '''set up simple cell simulation with LFPs in the plane''' # Create cell cell = LFPy.Cell(morphology=morphology, **cell_parameters) # Align cell cell.set_rotation(**cell_rotation) # Define synapse parameters synapse_parameters = { 'idx' : cell.get_closest_idx(**closest_idx), 'e' : 0., # reversal potential 'syntype' : 'ExpSynI', # synapse type 'tau' : 0.5, # synaptic time constant 'weight' : 0.0878, # synaptic weight 'record_current' : True, # record synapse current } # Create synapse and set time of synaptic input synapse = LFPy.Synapse(cell, **synapse_parameters) synapse.set_spike_times(np.array([1.])) # Create electrode object # Run simulation, electrode object argument in cell.simulate print "running simulation..." cell.simulate(rec_imem=True,rec_isyn=True) grid_electrode = LFPy.RecExtElectrode(cell,**grid_electrode_parameters) point_electrode = LFPy.RecExtElectrode(cell,**point_electrode_parameters) grid_electrode.calc_lfp() point_electrode.calc_lfp() print "done" return cell, synapse, grid_electrode, point_electrode
python
def run_sim(morphology='patdemo/cells/j4a.hoc', cell_rotation=dict(x=4.99, y=-4.33, z=3.14), closest_idx=dict(x=-200., y=0., z=800.)): '''set up simple cell simulation with LFPs in the plane''' # Create cell cell = LFPy.Cell(morphology=morphology, **cell_parameters) # Align cell cell.set_rotation(**cell_rotation) # Define synapse parameters synapse_parameters = { 'idx' : cell.get_closest_idx(**closest_idx), 'e' : 0., # reversal potential 'syntype' : 'ExpSynI', # synapse type 'tau' : 0.5, # synaptic time constant 'weight' : 0.0878, # synaptic weight 'record_current' : True, # record synapse current } # Create synapse and set time of synaptic input synapse = LFPy.Synapse(cell, **synapse_parameters) synapse.set_spike_times(np.array([1.])) # Create electrode object # Run simulation, electrode object argument in cell.simulate print "running simulation..." cell.simulate(rec_imem=True,rec_isyn=True) grid_electrode = LFPy.RecExtElectrode(cell,**grid_electrode_parameters) point_electrode = LFPy.RecExtElectrode(cell,**point_electrode_parameters) grid_electrode.calc_lfp() point_electrode.calc_lfp() print "done" return cell, synapse, grid_electrode, point_electrode
[ "def", "run_sim", "(", "morphology", "=", "'patdemo/cells/j4a.hoc'", ",", "cell_rotation", "=", "dict", "(", "x", "=", "4.99", ",", "y", "=", "-", "4.33", ",", "z", "=", "3.14", ")", ",", "closest_idx", "=", "dict", "(", "x", "=", "-", "200.", ",", ...
set up simple cell simulation with LFPs in the plane
[ "set", "up", "simple", "cell", "simulation", "with", "LFPs", "in", "the", "plane" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/Fig3/Fig3.py#L69-L108
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/Fig3/Fig3.py
plot_sim
def plot_sim(ax, cell, synapse, grid_electrode, point_electrode, letter='a'): '''create a plot''' fig = plt.figure(figsize = (3.27*2/3, 3.27*2/3)) ax = fig.add_axes([.1,.05,.9,.9], aspect='equal', frameon=False) phlp.annotate_subplot(ax, ncols=1, nrows=1, letter=letter, fontsize=16) cax = fig.add_axes([0.8, 0.2, 0.02, 0.2], frameon=False) LFP = np.max(np.abs(grid_electrode.LFP),1).reshape(X.shape) im = ax.contour(X, Z, np.log10(LFP), 50, cmap='RdBu', linewidths=1.5, zorder=-2) cbar = fig.colorbar(im, cax=cax) cbar.set_label('$|\phi(\mathbf{r}, t)|_\mathrm{max}$ (nV)') cbar.outline.set_visible(False) #get some log-linear tickmarks and ticklabels ticks = np.arange(np.ceil(np.log10(LFP.min())), np.ceil(np.log10(LFP.max()))) cbar.set_ticks(ticks) cbar.set_ticklabels(10.**ticks * 1E6) #mv -> nV zips = [] for x, z in cell.get_idx_polygons(): zips.append(zip(x, z)) polycol = PolyCollection(zips, edgecolors='k', linewidths=0.5, facecolors='k') ax.add_collection(polycol) ax.plot([100, 200], [-400, -400], 'k', lw=1, clip_on=False) ax.text(150, -470, r'100$\mu$m', va='center', ha='center') ax.axis('off') ax.plot(cell.xmid[cell.synidx],cell.zmid[cell.synidx], 'o', ms=5, markeredgecolor='k', markerfacecolor='r') color_vec = ['blue','green'] for i in xrange(2): ax.plot(point_electrode_parameters['x'][i], point_electrode_parameters['z'][i],'o',ms=6, markeredgecolor='none', markerfacecolor=color_vec[i]) plt.axes([.11, .075, .25, .2]) plt.plot(cell.tvec,point_electrode.LFP[0]*1e6,color=color_vec[0], clip_on=False) plt.plot(cell.tvec,point_electrode.LFP[1]*1e6,color=color_vec[1], clip_on=False) plt.axis('tight') ax = plt.gca() ax.set_ylabel(r'$\phi(\mathbf{r}, t)$ (nV)') #rotation='horizontal') ax.set_xlabel('$t$ (ms)', va='center') for loc, spine in ax.spines.iteritems(): if loc in ['right', 'top']: spine.set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') plt.axes([.11, 0.285, .25, .2]) plt.plot(cell.tvec,synapse.i*1E3, color='red', clip_on=False) plt.axis('tight') ax = plt.gca() ax.set_ylabel(r'$I_{i, j}(t)$ (pA)', ha='center', va='center') #, rotation='horizontal') for loc, spine in ax.spines.iteritems(): if loc in ['right', 'top']: spine.set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.set_xticklabels([]) return fig
python
def plot_sim(ax, cell, synapse, grid_electrode, point_electrode, letter='a'): '''create a plot''' fig = plt.figure(figsize = (3.27*2/3, 3.27*2/3)) ax = fig.add_axes([.1,.05,.9,.9], aspect='equal', frameon=False) phlp.annotate_subplot(ax, ncols=1, nrows=1, letter=letter, fontsize=16) cax = fig.add_axes([0.8, 0.2, 0.02, 0.2], frameon=False) LFP = np.max(np.abs(grid_electrode.LFP),1).reshape(X.shape) im = ax.contour(X, Z, np.log10(LFP), 50, cmap='RdBu', linewidths=1.5, zorder=-2) cbar = fig.colorbar(im, cax=cax) cbar.set_label('$|\phi(\mathbf{r}, t)|_\mathrm{max}$ (nV)') cbar.outline.set_visible(False) #get some log-linear tickmarks and ticklabels ticks = np.arange(np.ceil(np.log10(LFP.min())), np.ceil(np.log10(LFP.max()))) cbar.set_ticks(ticks) cbar.set_ticklabels(10.**ticks * 1E6) #mv -> nV zips = [] for x, z in cell.get_idx_polygons(): zips.append(zip(x, z)) polycol = PolyCollection(zips, edgecolors='k', linewidths=0.5, facecolors='k') ax.add_collection(polycol) ax.plot([100, 200], [-400, -400], 'k', lw=1, clip_on=False) ax.text(150, -470, r'100$\mu$m', va='center', ha='center') ax.axis('off') ax.plot(cell.xmid[cell.synidx],cell.zmid[cell.synidx], 'o', ms=5, markeredgecolor='k', markerfacecolor='r') color_vec = ['blue','green'] for i in xrange(2): ax.plot(point_electrode_parameters['x'][i], point_electrode_parameters['z'][i],'o',ms=6, markeredgecolor='none', markerfacecolor=color_vec[i]) plt.axes([.11, .075, .25, .2]) plt.plot(cell.tvec,point_electrode.LFP[0]*1e6,color=color_vec[0], clip_on=False) plt.plot(cell.tvec,point_electrode.LFP[1]*1e6,color=color_vec[1], clip_on=False) plt.axis('tight') ax = plt.gca() ax.set_ylabel(r'$\phi(\mathbf{r}, t)$ (nV)') #rotation='horizontal') ax.set_xlabel('$t$ (ms)', va='center') for loc, spine in ax.spines.iteritems(): if loc in ['right', 'top']: spine.set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') plt.axes([.11, 0.285, .25, .2]) plt.plot(cell.tvec,synapse.i*1E3, color='red', clip_on=False) plt.axis('tight') ax = plt.gca() ax.set_ylabel(r'$I_{i, j}(t)$ (pA)', ha='center', va='center') #, rotation='horizontal') for loc, spine in ax.spines.iteritems(): if loc in ['right', 'top']: spine.set_color('none') ax.xaxis.set_ticks_position('bottom') ax.yaxis.set_ticks_position('left') ax.set_xticklabels([]) return fig
[ "def", "plot_sim", "(", "ax", ",", "cell", ",", "synapse", ",", "grid_electrode", ",", "point_electrode", ",", "letter", "=", "'a'", ")", ":", "fig", "=", "plt", ".", "figure", "(", "figsize", "=", "(", "3.27", "*", "2", "/", "3", ",", "3.27", "*",...
create a plot
[ "create", "a", "plot" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/Fig3/Fig3.py#L111-L189
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/Fig3/Fig3.py
plot_sim_tstep
def plot_sim_tstep(fig, ax, cell, synapse, grid_electrode, point_electrode, tstep=0, letter='a',title='', cbar=True, show_legend=False): '''create a plot''' ax.set_title(title) if letter != None: phlp.annotate_subplot(ax, ncols=3, nrows=1, letter=letter, linear_offset=0.05, fontsize=16) LFP = grid_electrode.LFP[:, tstep].reshape(X.shape).copy() LFP *= 1E6 #mv -> nV vlim = 50 levels = np.linspace(-vlim*2, vlim*2, 401) cbarticks = np.mgrid[-50:51:20] #cbarticks = [-10**np.floor(np.log10(vlim)), # 0, # 10**np.floor(np.log10(vlim)),] #force dashed for negative values linestyles = [] for level in levels: if analysis_params.bw: if level > 0: linestyles.append('-') elif level == 0: linestyles.append((0, (5, 5))) else: linestyles.append((0, (1.0, 1.0))) else: # linestyles.append('-') if level > 0: linestyles.append('-') elif level == 0: linestyles.append((0, (5, 5))) else: linestyles.append('-') if np.any(LFP != np.zeros(LFP.shape)): im = ax.contour(X, Z, LFP, levels=levels, cmap='gray' if analysis_params.bw else 'RdBu', vmin=-vlim, vmax=vlim, linewidths=3, linestyles=linestyles, zorder=-2, rasterized=False) bbox = np.array(ax.get_position()).flatten() if cbar: cax = fig.add_axes((bbox[2]-0.01, 0.2, 0.01, 0.4), frameon=False) cbar = fig.colorbar(im, cax=cax, format=FormatStrFormatter('%i'), values=[-vlim, vlim]) cbar.set_ticks(cbarticks) cbar.set_label('$\phi(\mathbf{r}, t)$ (nV)', labelpad=0) cbar.outline.set_visible(False) if show_legend: proxy = [plt.Line2D((0,1),(0,1), color='gray' if analysis_params.bw else plt.get_cmap('RdBu', 3)(2), ls='-', lw=3), plt.Line2D((0,1),(0,1), color='gray' if analysis_params.bw else plt.get_cmap('RdBu', 3)(1), ls=(0, (5, 5)), lw=3), plt.Line2D((0,1),(0,1), color='gray' if analysis_params.bw else plt.get_cmap('RdBu', 3)(0), ls=(0, (1, 1)), lw=3), ] ax.legend(proxy, [r'$\phi(\mathbf{r}, t) > 0$ nV', r'$\phi(\mathbf{r}, t) = 0$ nV', r'$\phi(\mathbf{r}, t) < 0$ nV'], loc=1, bbox_to_anchor=(1.2, 1), fontsize=10, frameon=False) zips = [] for x, z in cell.get_idx_polygons(): zips.append(zip(x, z)) polycol = PolyCollection(zips, edgecolors='k', linewidths=0.5, facecolors='k') ax.add_collection(polycol) ax.plot([100, 200], [-400, -400], 'k', lw=2, clip_on=False) ax.text(150, -470, r'100$\mu$m', va='center', ha='center') ax.axis('off') ax.plot(cell.xmid[cell.synidx],cell.zmid[cell.synidx], 'o', ms=6, markeredgecolor='k', markerfacecolor='w' if analysis_params.bw else 'r') color_vec = ['k' if analysis_params.bw else 'b', 'gray' if analysis_params.bw else 'g'] for i in xrange(2): ax.plot(point_electrode_parameters['x'][i], point_electrode_parameters['z'][i],'o',ms=6, markeredgecolor='k', markerfacecolor=color_vec[i]) bbox = np.array(ax.get_position()).flatten() ax1 = fig.add_axes((bbox[0], bbox[1], 0.05, 0.2)) ax1.plot(cell.tvec,point_electrode.LFP[0]*1e6,color=color_vec[0], clip_on=False) ax1.plot(cell.tvec,point_electrode.LFP[1]*1e6,color=color_vec[1], clip_on=False) axis = ax1.axis(ax1.axis('tight')) ax1.yaxis.set_major_locator(MaxNLocator(4)) ax1.vlines(cell.tvec[tstep], axis[2], axis[3], lw=0.2) ax1.set_ylabel(r'$\phi(\mathbf{r}, t)$ (nV)', labelpad=0) #rotation='horizontal') ax1.set_xlabel('$t$ (ms)', labelpad=0) for loc, spine in ax1.spines.iteritems(): if loc in ['right', 'top']: spine.set_color('none') ax1.xaxis.set_ticks_position('bottom') ax1.yaxis.set_ticks_position('left') ax2 = fig.add_axes((bbox[0], bbox[1]+.6, 0.05, 0.2)) ax2.plot(cell.tvec,synapse.i*1E3, color='k' if analysis_params.bw else 'r', clip_on=False) axis = ax2.axis(ax2.axis('tight')) ax2.yaxis.set_major_locator(MaxNLocator(4)) ax2.vlines(cell.tvec[tstep], axis[2], axis[3]) ax2.set_ylabel(r'$I_{i, j}(t)$ (pA)', labelpad=0) #, rotation='horizontal') for loc, spine in ax2.spines.iteritems(): if loc in ['right', 'top']: spine.set_color('none') ax2.xaxis.set_ticks_position('bottom') ax2.yaxis.set_ticks_position('left') ax2.set_xticklabels([])
python
def plot_sim_tstep(fig, ax, cell, synapse, grid_electrode, point_electrode, tstep=0, letter='a',title='', cbar=True, show_legend=False): '''create a plot''' ax.set_title(title) if letter != None: phlp.annotate_subplot(ax, ncols=3, nrows=1, letter=letter, linear_offset=0.05, fontsize=16) LFP = grid_electrode.LFP[:, tstep].reshape(X.shape).copy() LFP *= 1E6 #mv -> nV vlim = 50 levels = np.linspace(-vlim*2, vlim*2, 401) cbarticks = np.mgrid[-50:51:20] #cbarticks = [-10**np.floor(np.log10(vlim)), # 0, # 10**np.floor(np.log10(vlim)),] #force dashed for negative values linestyles = [] for level in levels: if analysis_params.bw: if level > 0: linestyles.append('-') elif level == 0: linestyles.append((0, (5, 5))) else: linestyles.append((0, (1.0, 1.0))) else: # linestyles.append('-') if level > 0: linestyles.append('-') elif level == 0: linestyles.append((0, (5, 5))) else: linestyles.append('-') if np.any(LFP != np.zeros(LFP.shape)): im = ax.contour(X, Z, LFP, levels=levels, cmap='gray' if analysis_params.bw else 'RdBu', vmin=-vlim, vmax=vlim, linewidths=3, linestyles=linestyles, zorder=-2, rasterized=False) bbox = np.array(ax.get_position()).flatten() if cbar: cax = fig.add_axes((bbox[2]-0.01, 0.2, 0.01, 0.4), frameon=False) cbar = fig.colorbar(im, cax=cax, format=FormatStrFormatter('%i'), values=[-vlim, vlim]) cbar.set_ticks(cbarticks) cbar.set_label('$\phi(\mathbf{r}, t)$ (nV)', labelpad=0) cbar.outline.set_visible(False) if show_legend: proxy = [plt.Line2D((0,1),(0,1), color='gray' if analysis_params.bw else plt.get_cmap('RdBu', 3)(2), ls='-', lw=3), plt.Line2D((0,1),(0,1), color='gray' if analysis_params.bw else plt.get_cmap('RdBu', 3)(1), ls=(0, (5, 5)), lw=3), plt.Line2D((0,1),(0,1), color='gray' if analysis_params.bw else plt.get_cmap('RdBu', 3)(0), ls=(0, (1, 1)), lw=3), ] ax.legend(proxy, [r'$\phi(\mathbf{r}, t) > 0$ nV', r'$\phi(\mathbf{r}, t) = 0$ nV', r'$\phi(\mathbf{r}, t) < 0$ nV'], loc=1, bbox_to_anchor=(1.2, 1), fontsize=10, frameon=False) zips = [] for x, z in cell.get_idx_polygons(): zips.append(zip(x, z)) polycol = PolyCollection(zips, edgecolors='k', linewidths=0.5, facecolors='k') ax.add_collection(polycol) ax.plot([100, 200], [-400, -400], 'k', lw=2, clip_on=False) ax.text(150, -470, r'100$\mu$m', va='center', ha='center') ax.axis('off') ax.plot(cell.xmid[cell.synidx],cell.zmid[cell.synidx], 'o', ms=6, markeredgecolor='k', markerfacecolor='w' if analysis_params.bw else 'r') color_vec = ['k' if analysis_params.bw else 'b', 'gray' if analysis_params.bw else 'g'] for i in xrange(2): ax.plot(point_electrode_parameters['x'][i], point_electrode_parameters['z'][i],'o',ms=6, markeredgecolor='k', markerfacecolor=color_vec[i]) bbox = np.array(ax.get_position()).flatten() ax1 = fig.add_axes((bbox[0], bbox[1], 0.05, 0.2)) ax1.plot(cell.tvec,point_electrode.LFP[0]*1e6,color=color_vec[0], clip_on=False) ax1.plot(cell.tvec,point_electrode.LFP[1]*1e6,color=color_vec[1], clip_on=False) axis = ax1.axis(ax1.axis('tight')) ax1.yaxis.set_major_locator(MaxNLocator(4)) ax1.vlines(cell.tvec[tstep], axis[2], axis[3], lw=0.2) ax1.set_ylabel(r'$\phi(\mathbf{r}, t)$ (nV)', labelpad=0) #rotation='horizontal') ax1.set_xlabel('$t$ (ms)', labelpad=0) for loc, spine in ax1.spines.iteritems(): if loc in ['right', 'top']: spine.set_color('none') ax1.xaxis.set_ticks_position('bottom') ax1.yaxis.set_ticks_position('left') ax2 = fig.add_axes((bbox[0], bbox[1]+.6, 0.05, 0.2)) ax2.plot(cell.tvec,synapse.i*1E3, color='k' if analysis_params.bw else 'r', clip_on=False) axis = ax2.axis(ax2.axis('tight')) ax2.yaxis.set_major_locator(MaxNLocator(4)) ax2.vlines(cell.tvec[tstep], axis[2], axis[3]) ax2.set_ylabel(r'$I_{i, j}(t)$ (pA)', labelpad=0) #, rotation='horizontal') for loc, spine in ax2.spines.iteritems(): if loc in ['right', 'top']: spine.set_color('none') ax2.xaxis.set_ticks_position('bottom') ax2.yaxis.set_ticks_position('left') ax2.set_xticklabels([])
[ "def", "plot_sim_tstep", "(", "fig", ",", "ax", ",", "cell", ",", "synapse", ",", "grid_electrode", ",", "point_electrode", ",", "tstep", "=", "0", ",", "letter", "=", "'a'", ",", "title", "=", "''", ",", "cbar", "=", "True", ",", "show_legend", "=", ...
create a plot
[ "create", "a", "plot" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/Fig3/Fig3.py#L192-L315
INM-6/hybridLFPy
examples/example_microcircuit_params.py
get_F_y
def get_F_y(fname='binzegger_connectivity_table.json', y=['p23']): ''' Extract frequency of occurrences of those cell types that are modeled. The data set contains cell types that are not modeled (TCs etc.) The returned percentages are renormalized onto modeled cell-types, i.e. they sum up to 1 ''' # Load data from json dictionary f = open(fname,'r') data = json.load(f) f.close() occurr = [] for cell_type in y: occurr += [data['data'][cell_type]['occurrence']] return list(np.array(occurr)/np.sum(occurr))
python
def get_F_y(fname='binzegger_connectivity_table.json', y=['p23']): ''' Extract frequency of occurrences of those cell types that are modeled. The data set contains cell types that are not modeled (TCs etc.) The returned percentages are renormalized onto modeled cell-types, i.e. they sum up to 1 ''' # Load data from json dictionary f = open(fname,'r') data = json.load(f) f.close() occurr = [] for cell_type in y: occurr += [data['data'][cell_type]['occurrence']] return list(np.array(occurr)/np.sum(occurr))
[ "def", "get_F_y", "(", "fname", "=", "'binzegger_connectivity_table.json'", ",", "y", "=", "[", "'p23'", "]", ")", ":", "# Load data from json dictionary", "f", "=", "open", "(", "fname", ",", "'r'", ")", "data", "=", "json", ".", "load", "(", "f", ")", ...
Extract frequency of occurrences of those cell types that are modeled. The data set contains cell types that are not modeled (TCs etc.) The returned percentages are renormalized onto modeled cell-types, i.e. they sum up to 1
[ "Extract", "frequency", "of", "occurrences", "of", "those", "cell", "types", "that", "are", "modeled", ".", "The", "data", "set", "contains", "cell", "types", "that", "are", "not", "modeled", "(", "TCs", "etc", ".", ")", "The", "returned", "percentages", "...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/example_microcircuit_params.py#L44-L58
INM-6/hybridLFPy
examples/example_microcircuit_params.py
get_L_yXL
def get_L_yXL(fname, y, x_in_X, L): ''' compute the layer specificity, defined as: :: L_yXL = k_yXL / k_yX ''' def _get_L_yXL_per_yXL(fname, x_in_X, X_index, y, layer): # Load data from json dictionary f = open(fname, 'r') data = json.load(f) f.close() # Get number of synapses if layer in [str(key) for key in data['data'][y]['syn_dict'].keys()]: #init variables k_yXL = 0 k_yX = 0 for x in x_in_X[X_index]: p_yxL = data['data'][y]['syn_dict'][layer][x] / 100. k_yL = data['data'][y]['syn_dict'][layer]['number of synapses per neuron'] k_yXL += p_yxL * k_yL for l in [str(key) for key in data['data'][y]['syn_dict'].keys()]: for x in x_in_X[X_index]: p_yxL = data['data'][y]['syn_dict'][l][x] / 100. k_yL = data['data'][y]['syn_dict'][l]['number of synapses per neuron'] k_yX += p_yxL * k_yL if k_yXL != 0.: return k_yXL / k_yX else: return 0. else: return 0. #init dict L_yXL = {} #iterate over postsynaptic cell types for y_value in y: #container data = np.zeros((len(L), len(x_in_X))) #iterate over lamina for i, Li in enumerate(L): #iterate over presynapse population inds for j in range(len(x_in_X)): data[i][j]= _get_L_yXL_per_yXL(fname, x_in_X, X_index=j, y=y_value, layer=Li) L_yXL[y_value] = data return L_yXL
python
def get_L_yXL(fname, y, x_in_X, L): ''' compute the layer specificity, defined as: :: L_yXL = k_yXL / k_yX ''' def _get_L_yXL_per_yXL(fname, x_in_X, X_index, y, layer): # Load data from json dictionary f = open(fname, 'r') data = json.load(f) f.close() # Get number of synapses if layer in [str(key) for key in data['data'][y]['syn_dict'].keys()]: #init variables k_yXL = 0 k_yX = 0 for x in x_in_X[X_index]: p_yxL = data['data'][y]['syn_dict'][layer][x] / 100. k_yL = data['data'][y]['syn_dict'][layer]['number of synapses per neuron'] k_yXL += p_yxL * k_yL for l in [str(key) for key in data['data'][y]['syn_dict'].keys()]: for x in x_in_X[X_index]: p_yxL = data['data'][y]['syn_dict'][l][x] / 100. k_yL = data['data'][y]['syn_dict'][l]['number of synapses per neuron'] k_yX += p_yxL * k_yL if k_yXL != 0.: return k_yXL / k_yX else: return 0. else: return 0. #init dict L_yXL = {} #iterate over postsynaptic cell types for y_value in y: #container data = np.zeros((len(L), len(x_in_X))) #iterate over lamina for i, Li in enumerate(L): #iterate over presynapse population inds for j in range(len(x_in_X)): data[i][j]= _get_L_yXL_per_yXL(fname, x_in_X, X_index=j, y=y_value, layer=Li) L_yXL[y_value] = data return L_yXL
[ "def", "get_L_yXL", "(", "fname", ",", "y", ",", "x_in_X", ",", "L", ")", ":", "def", "_get_L_yXL_per_yXL", "(", "fname", ",", "x_in_X", ",", "X_index", ",", "y", ",", "layer", ")", ":", "# Load data from json dictionary", "f", "=", "open", "(", "fname",...
compute the layer specificity, defined as: :: L_yXL = k_yXL / k_yX
[ "compute", "the", "layer", "specificity", "defined", "as", ":", "::", "L_yXL", "=", "k_yXL", "/", "k_yX" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/example_microcircuit_params.py#L62-L119
INM-6/hybridLFPy
examples/example_microcircuit_params.py
get_T_yX
def get_T_yX(fname, y, y_in_Y, x_in_X, F_y): ''' compute the cell type specificity, defined as: :: T_yX = K_yX / K_YX = F_y * k_yX / sum_y(F_y*k_yX) ''' def _get_k_yX_mul_F_y(y, y_index, X_index): # Load data from json dictionary f = open(fname, 'r') data = json.load(f) f.close() #init variables k_yX = 0. for l in [str(key) for key in data['data'][y]['syn_dict'].keys()]: for x in x_in_X[X_index]: p_yxL = data['data'][y]['syn_dict'][l][x] / 100. k_yL = data['data'][y]['syn_dict'][l]['number of synapses per neuron'] k_yX += p_yxL * k_yL return k_yX * F_y[y_index] #container T_yX = np.zeros((len(y), len(x_in_X))) #iterate over postsynaptic cell types for i, y_value in enumerate(y): #iterate over presynapse population inds for j in range(len(x_in_X)): k_yX_mul_F_y = 0 for k, yy in enumerate(sum(y_in_Y, [])): if y_value in yy: for yy_value in yy: ii = np.where(np.array(y) == yy_value)[0][0] k_yX_mul_F_y += _get_k_yX_mul_F_y(yy_value, ii, j) if k_yX_mul_F_y != 0: T_yX[i, j] = _get_k_yX_mul_F_y(y_value, i, j) / k_yX_mul_F_y return T_yX
python
def get_T_yX(fname, y, y_in_Y, x_in_X, F_y): ''' compute the cell type specificity, defined as: :: T_yX = K_yX / K_YX = F_y * k_yX / sum_y(F_y*k_yX) ''' def _get_k_yX_mul_F_y(y, y_index, X_index): # Load data from json dictionary f = open(fname, 'r') data = json.load(f) f.close() #init variables k_yX = 0. for l in [str(key) for key in data['data'][y]['syn_dict'].keys()]: for x in x_in_X[X_index]: p_yxL = data['data'][y]['syn_dict'][l][x] / 100. k_yL = data['data'][y]['syn_dict'][l]['number of synapses per neuron'] k_yX += p_yxL * k_yL return k_yX * F_y[y_index] #container T_yX = np.zeros((len(y), len(x_in_X))) #iterate over postsynaptic cell types for i, y_value in enumerate(y): #iterate over presynapse population inds for j in range(len(x_in_X)): k_yX_mul_F_y = 0 for k, yy in enumerate(sum(y_in_Y, [])): if y_value in yy: for yy_value in yy: ii = np.where(np.array(y) == yy_value)[0][0] k_yX_mul_F_y += _get_k_yX_mul_F_y(yy_value, ii, j) if k_yX_mul_F_y != 0: T_yX[i, j] = _get_k_yX_mul_F_y(y_value, i, j) / k_yX_mul_F_y return T_yX
[ "def", "get_T_yX", "(", "fname", ",", "y", ",", "y_in_Y", ",", "x_in_X", ",", "F_y", ")", ":", "def", "_get_k_yX_mul_F_y", "(", "y", ",", "y_index", ",", "X_index", ")", ":", "# Load data from json dictionary", "f", "=", "open", "(", "fname", ",", "'r'",...
compute the cell type specificity, defined as: :: T_yX = K_yX / K_YX = F_y * k_yX / sum_y(F_y*k_yX)
[ "compute", "the", "cell", "type", "specificity", "defined", "as", ":", "::", "T_yX", "=", "K_yX", "/", "K_YX", "=", "F_y", "*", "k_yX", "/", "sum_y", "(", "F_y", "*", "k_yX", ")" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/example_microcircuit_params.py#L123-L168
INM-6/hybridLFPy
examples/example_microcircuit_params.py
point_neuron_network_params._compute_J
def _compute_J(self): ''' Compute the current amplitude corresponding to the exponential synapse model PSP amplitude Derivation using sympy: :: from sympy import * #define symbols t, tm, Cm, ts, Is, Vmax = symbols('t tm Cm ts Is Vmax') #assume zero delay, t >= 0 #using eq. 8.10 in Sterrat et al V = tm*ts*Is*(exp(-t/tm) - exp(-t/ts)) / (tm-ts) / Cm print 'V = %s' % V #find time of V == Vmax dVdt = diff(V, t) print 'dVdt = %s' % dVdt [t] = solve(dVdt, t) print 't(t@dVdT==Vmax) = %s' % t #solve for Is at time of maxima V = tm*ts*Is*(exp(-t/tm) - exp(-t/ts)) / (tm-ts) / Cm print 'V(%s) = %s' % (t, V) [Is] = solve(V-Vmax, Is) print 'Is = %s' % Is resulting in: :: Cm*Vmax*(-tm + ts)/(tm*ts*(exp(tm*log(ts/tm)/(tm - ts)) - exp(ts*log(ts/tm)/(tm - ts)))) ''' #LIF params tm = self.model_params['tau_m'] Cm = self.model_params['C_m'] #synapse ts = self.model_params['tau_syn_ex'] Vmax = self.PSP_e #max current amplitude J = Cm*Vmax*(-tm + ts)/(tm*ts*(np.exp(tm*np.log(ts/tm)/(tm - ts)) - np.exp(ts*np.log(ts/tm)/(tm - ts)))) #unit conversion pF*mV -> nA J *= 1E-3 return J
python
def _compute_J(self): ''' Compute the current amplitude corresponding to the exponential synapse model PSP amplitude Derivation using sympy: :: from sympy import * #define symbols t, tm, Cm, ts, Is, Vmax = symbols('t tm Cm ts Is Vmax') #assume zero delay, t >= 0 #using eq. 8.10 in Sterrat et al V = tm*ts*Is*(exp(-t/tm) - exp(-t/ts)) / (tm-ts) / Cm print 'V = %s' % V #find time of V == Vmax dVdt = diff(V, t) print 'dVdt = %s' % dVdt [t] = solve(dVdt, t) print 't(t@dVdT==Vmax) = %s' % t #solve for Is at time of maxima V = tm*ts*Is*(exp(-t/tm) - exp(-t/ts)) / (tm-ts) / Cm print 'V(%s) = %s' % (t, V) [Is] = solve(V-Vmax, Is) print 'Is = %s' % Is resulting in: :: Cm*Vmax*(-tm + ts)/(tm*ts*(exp(tm*log(ts/tm)/(tm - ts)) - exp(ts*log(ts/tm)/(tm - ts)))) ''' #LIF params tm = self.model_params['tau_m'] Cm = self.model_params['C_m'] #synapse ts = self.model_params['tau_syn_ex'] Vmax = self.PSP_e #max current amplitude J = Cm*Vmax*(-tm + ts)/(tm*ts*(np.exp(tm*np.log(ts/tm)/(tm - ts)) - np.exp(ts*np.log(ts/tm)/(tm - ts)))) #unit conversion pF*mV -> nA J *= 1E-3 return J
[ "def", "_compute_J", "(", "self", ")", ":", "#LIF params", "tm", "=", "self", ".", "model_params", "[", "'tau_m'", "]", "Cm", "=", "self", ".", "model_params", "[", "'C_m'", "]", "#synapse", "ts", "=", "self", ".", "model_params", "[", "'tau_syn_ex'", "]...
Compute the current amplitude corresponding to the exponential synapse model PSP amplitude Derivation using sympy: :: from sympy import * #define symbols t, tm, Cm, ts, Is, Vmax = symbols('t tm Cm ts Is Vmax') #assume zero delay, t >= 0 #using eq. 8.10 in Sterrat et al V = tm*ts*Is*(exp(-t/tm) - exp(-t/ts)) / (tm-ts) / Cm print 'V = %s' % V #find time of V == Vmax dVdt = diff(V, t) print 'dVdt = %s' % dVdt [t] = solve(dVdt, t) print 't(t@dVdT==Vmax) = %s' % t #solve for Is at time of maxima V = tm*ts*Is*(exp(-t/tm) - exp(-t/ts)) / (tm-ts) / Cm print 'V(%s) = %s' % (t, V) [Is] = solve(V-Vmax, Is) print 'Is = %s' % Is resulting in: :: Cm*Vmax*(-tm + ts)/(tm*ts*(exp(tm*log(ts/tm)/(tm - ts)) - exp(ts*log(ts/tm)/(tm - ts))))
[ "Compute", "the", "current", "amplitude", "corresponding", "to", "the", "exponential", "synapse", "model", "PSP", "amplitude", "Derivation", "using", "sympy", ":", "::", "from", "sympy", "import", "*", "#define", "symbols", "t", "tm", "Cm", "ts", "Is", "Vmax",...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/example_microcircuit_params.py#L670-L721
INM-6/hybridLFPy
examples/example_microcircuit_params.py
multicompartment_params._synDelayParams
def _synDelayParams(self): ''' set up the detailed synaptic delay parameters, loc is mean delay, scale is std with low bound cutoff, assumes numpy.random.normal is used later ''' delays = {} #mean delays loc = np.zeros((len(self.y), len(self.X))) loc[:, 0] = self.delays[0] loc[:, 1::2] = self.delays[0] loc[:, 2::2] = self.delays[1] #standard deviations scale = loc * self.delay_rel_sd #prepare output delay_loc = {} for i, y in enumerate(self.y): delay_loc.update({y : loc[i]}) delay_scale = {} for i, y in enumerate(self.y): delay_scale.update({y : scale[i]}) return delay_loc, delay_scale
python
def _synDelayParams(self): ''' set up the detailed synaptic delay parameters, loc is mean delay, scale is std with low bound cutoff, assumes numpy.random.normal is used later ''' delays = {} #mean delays loc = np.zeros((len(self.y), len(self.X))) loc[:, 0] = self.delays[0] loc[:, 1::2] = self.delays[0] loc[:, 2::2] = self.delays[1] #standard deviations scale = loc * self.delay_rel_sd #prepare output delay_loc = {} for i, y in enumerate(self.y): delay_loc.update({y : loc[i]}) delay_scale = {} for i, y in enumerate(self.y): delay_scale.update({y : scale[i]}) return delay_loc, delay_scale
[ "def", "_synDelayParams", "(", "self", ")", ":", "delays", "=", "{", "}", "#mean delays", "loc", "=", "np", ".", "zeros", "(", "(", "len", "(", "self", ".", "y", ")", ",", "len", "(", "self", ".", "X", ")", ")", ")", "loc", "[", ":", ",", "0"...
set up the detailed synaptic delay parameters, loc is mean delay, scale is std with low bound cutoff, assumes numpy.random.normal is used later
[ "set", "up", "the", "detailed", "synaptic", "delay", "parameters", "loc", "is", "mean", "delay", "scale", "is", "std", "with", "low", "bound", "cutoff", "assumes", "numpy", ".", "random", ".", "normal", "is", "used", "later" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/example_microcircuit_params.py#L1026-L1051
INM-6/hybridLFPy
examples/example_microcircuit_params.py
multicompartment_params._calcDepths
def _calcDepths(self): ''' return the cortical depth of each subpopulation ''' depths = self.layerBoundaries.mean(axis=1)[1:] depth_y = [] for y in self.y: if y in ['p23', 'b23', 'nb23']: depth_y = np.r_[depth_y, depths[0]] elif y in ['p4', 'ss4(L23)', 'ss4(L4)', 'b4', 'nb4']: depth_y = np.r_[depth_y, depths[1]] elif y in ['p5(L23)', 'p5(L56)', 'b5', 'nb5']: depth_y = np.r_[depth_y, depths[2]] elif y in ['p6(L4)', 'p6(L56)', 'b6', 'nb6']: depth_y = np.r_[depth_y, depths[3]] else: raise Exception, 'this aint right' return depth_y
python
def _calcDepths(self): ''' return the cortical depth of each subpopulation ''' depths = self.layerBoundaries.mean(axis=1)[1:] depth_y = [] for y in self.y: if y in ['p23', 'b23', 'nb23']: depth_y = np.r_[depth_y, depths[0]] elif y in ['p4', 'ss4(L23)', 'ss4(L4)', 'b4', 'nb4']: depth_y = np.r_[depth_y, depths[1]] elif y in ['p5(L23)', 'p5(L56)', 'b5', 'nb5']: depth_y = np.r_[depth_y, depths[2]] elif y in ['p6(L4)', 'p6(L56)', 'b6', 'nb6']: depth_y = np.r_[depth_y, depths[3]] else: raise Exception, 'this aint right' return depth_y
[ "def", "_calcDepths", "(", "self", ")", ":", "depths", "=", "self", ".", "layerBoundaries", ".", "mean", "(", "axis", "=", "1", ")", "[", "1", ":", "]", "depth_y", "=", "[", "]", "for", "y", "in", "self", ".", "y", ":", "if", "y", "in", "[", ...
return the cortical depth of each subpopulation
[ "return", "the", "cortical", "depth", "of", "each", "subpopulation" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/example_microcircuit_params.py#L1054-L1073
INM-6/hybridLFPy
examples/example_microcircuit_params.py
multicompartment_params._yCellParams
def _yCellParams(self): ''' Return dict with parameters for each population. The main operation is filling in cell type specific morphology ''' #cell type specific parameters going into LFPy.Cell yCellParams = {} for layer, morpho, _, _ in self.y_zip_list: yCellParams.update({layer : self.cellParams.copy()}) yCellParams[layer].update({ 'morphology' : os.path.join(self.PATH_m_y, morpho), }) return yCellParams
python
def _yCellParams(self): ''' Return dict with parameters for each population. The main operation is filling in cell type specific morphology ''' #cell type specific parameters going into LFPy.Cell yCellParams = {} for layer, morpho, _, _ in self.y_zip_list: yCellParams.update({layer : self.cellParams.copy()}) yCellParams[layer].update({ 'morphology' : os.path.join(self.PATH_m_y, morpho), }) return yCellParams
[ "def", "_yCellParams", "(", "self", ")", ":", "#cell type specific parameters going into LFPy.Cell ", "yCellParams", "=", "{", "}", "for", "layer", ",", "morpho", ",", "_", ",", "_", "in", "self", ".", "y_zip_list", ":", "yCellParams", ".", "update", "(",...
Return dict with parameters for each population. The main operation is filling in cell type specific morphology
[ "Return", "dict", "with", "parameters", "for", "each", "population", ".", "The", "main", "operation", "is", "filling", "in", "cell", "type", "specific", "morphology" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/example_microcircuit_params.py#L1076-L1088
INM-6/hybridLFPy
hybridLFPy/population.py
PopulationSuper._set_up_savefolder
def _set_up_savefolder(self): """ Create catalogs for different file output to clean up savefolder. Non-public method Parameters ---------- None Returns ------- None """ if self.savefolder == None: return self.cells_path = os.path.join(self.savefolder, 'cells') if RANK == 0: if not os.path.isdir(self.cells_path): os.mkdir(self.cells_path) self.figures_path = os.path.join(self.savefolder, 'figures') if RANK == 0: if not os.path.isdir(self.figures_path): os.mkdir(self.figures_path) self.populations_path = os.path.join(self.savefolder, 'populations') if RANK == 0: if not os.path.isdir(self.populations_path): os.mkdir(self.populations_path) COMM.Barrier()
python
def _set_up_savefolder(self): """ Create catalogs for different file output to clean up savefolder. Non-public method Parameters ---------- None Returns ------- None """ if self.savefolder == None: return self.cells_path = os.path.join(self.savefolder, 'cells') if RANK == 0: if not os.path.isdir(self.cells_path): os.mkdir(self.cells_path) self.figures_path = os.path.join(self.savefolder, 'figures') if RANK == 0: if not os.path.isdir(self.figures_path): os.mkdir(self.figures_path) self.populations_path = os.path.join(self.savefolder, 'populations') if RANK == 0: if not os.path.isdir(self.populations_path): os.mkdir(self.populations_path) COMM.Barrier()
[ "def", "_set_up_savefolder", "(", "self", ")", ":", "if", "self", ".", "savefolder", "==", "None", ":", "return", "self", ".", "cells_path", "=", "os", ".", "path", ".", "join", "(", "self", ".", "savefolder", ",", "'cells'", ")", "if", "RANK", "==", ...
Create catalogs for different file output to clean up savefolder. Non-public method Parameters ---------- None Returns ------- None
[ "Create", "catalogs", "for", "different", "file", "output", "to", "clean", "up", "savefolder", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L255-L290
INM-6/hybridLFPy
hybridLFPy/population.py
PopulationSuper.run
def run(self): """ Distribute individual cell simulations across ranks. This method takes no keyword arguments. Parameters ---------- None Returns ------- None """ for cellindex in self.RANK_CELLINDICES: self.cellsim(cellindex) COMM.Barrier()
python
def run(self): """ Distribute individual cell simulations across ranks. This method takes no keyword arguments. Parameters ---------- None Returns ------- None """ for cellindex in self.RANK_CELLINDICES: self.cellsim(cellindex) COMM.Barrier()
[ "def", "run", "(", "self", ")", ":", "for", "cellindex", "in", "self", ".", "RANK_CELLINDICES", ":", "self", ".", "cellsim", "(", "cellindex", ")", "COMM", ".", "Barrier", "(", ")" ]
Distribute individual cell simulations across ranks. This method takes no keyword arguments. Parameters ---------- None Returns ------- None
[ "Distribute", "individual", "cell", "simulations", "across", "ranks", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L293-L313
INM-6/hybridLFPy
hybridLFPy/population.py
PopulationSuper.set_pop_soma_pos
def set_pop_soma_pos(self): """ Set `pop_soma_pos` using draw_rand_pos(). This method takes no keyword arguments. Parameters ---------- None Returns ------- numpy.ndarray (x,y,z) coordinates of each neuron in the population See also -------- PopulationSuper.draw_rand_pos """ tic = time() if RANK == 0: pop_soma_pos = self.draw_rand_pos( min_r = self.electrodeParams['r_z'], **self.populationParams) else: pop_soma_pos = None if RANK == 0: print('found cell positions in %.2f s' % (time()-tic)) return COMM.bcast(pop_soma_pos, root=0)
python
def set_pop_soma_pos(self): """ Set `pop_soma_pos` using draw_rand_pos(). This method takes no keyword arguments. Parameters ---------- None Returns ------- numpy.ndarray (x,y,z) coordinates of each neuron in the population See also -------- PopulationSuper.draw_rand_pos """ tic = time() if RANK == 0: pop_soma_pos = self.draw_rand_pos( min_r = self.electrodeParams['r_z'], **self.populationParams) else: pop_soma_pos = None if RANK == 0: print('found cell positions in %.2f s' % (time()-tic)) return COMM.bcast(pop_soma_pos, root=0)
[ "def", "set_pop_soma_pos", "(", "self", ")", ":", "tic", "=", "time", "(", ")", "if", "RANK", "==", "0", ":", "pop_soma_pos", "=", "self", ".", "draw_rand_pos", "(", "min_r", "=", "self", ".", "electrodeParams", "[", "'r_z'", "]", ",", "*", "*", "sel...
Set `pop_soma_pos` using draw_rand_pos(). This method takes no keyword arguments. Parameters ---------- None Returns ------- numpy.ndarray (x,y,z) coordinates of each neuron in the population See also -------- PopulationSuper.draw_rand_pos
[ "Set", "pop_soma_pos", "using", "draw_rand_pos", "()", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L397-L431
INM-6/hybridLFPy
hybridLFPy/population.py
PopulationSuper.calc_min_cell_interdist
def calc_min_cell_interdist(self, x, y, z): """ Calculate cell interdistance from input coordinates. Parameters ---------- x, y, z : numpy.ndarray xyz-coordinates of each cell-body. Returns ------- min_cell_interdist : np.nparray For each cell-body center, the distance to nearest neighboring cell """ min_cell_interdist = np.zeros(self.POPULATION_SIZE) for i in range(self.POPULATION_SIZE): cell_interdist = np.sqrt((x[i] - x)**2 + (y[i] - y)**2 + (z[i] - z)**2) cell_interdist[i] = np.inf min_cell_interdist[i] = cell_interdist.min() return min_cell_interdist
python
def calc_min_cell_interdist(self, x, y, z): """ Calculate cell interdistance from input coordinates. Parameters ---------- x, y, z : numpy.ndarray xyz-coordinates of each cell-body. Returns ------- min_cell_interdist : np.nparray For each cell-body center, the distance to nearest neighboring cell """ min_cell_interdist = np.zeros(self.POPULATION_SIZE) for i in range(self.POPULATION_SIZE): cell_interdist = np.sqrt((x[i] - x)**2 + (y[i] - y)**2 + (z[i] - z)**2) cell_interdist[i] = np.inf min_cell_interdist[i] = cell_interdist.min() return min_cell_interdist
[ "def", "calc_min_cell_interdist", "(", "self", ",", "x", ",", "y", ",", "z", ")", ":", "min_cell_interdist", "=", "np", ".", "zeros", "(", "self", ".", "POPULATION_SIZE", ")", "for", "i", "in", "range", "(", "self", ".", "POPULATION_SIZE", ")", ":", "c...
Calculate cell interdistance from input coordinates. Parameters ---------- x, y, z : numpy.ndarray xyz-coordinates of each cell-body. Returns ------- min_cell_interdist : np.nparray For each cell-body center, the distance to nearest neighboring cell
[ "Calculate", "cell", "interdistance", "from", "input", "coordinates", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L473-L499
INM-6/hybridLFPy
hybridLFPy/population.py
PopulationSuper.draw_rand_pos
def draw_rand_pos(self, radius, z_min, z_max, min_r=np.array([0]), min_cell_interdist=10., **args): """ Draw some random location within radius, z_min, z_max, and constrained by min_r and the minimum cell interdistance. Returned argument is a list of dicts [{'xpos', 'ypos', 'zpos'},]. Parameters ---------- radius : float Radius of population. z_min : float Lower z-boundary of population. z_max : float Upper z-boundary of population. min_r : numpy.ndarray Minimum distance to center axis as function of z. min_cell_interdist : float Minimum cell to cell interdistance. **args : keyword arguments Additional inputs that is being ignored. Returns ------- soma_pos : list List of dicts of len population size where dict have keys xpos, ypos, zpos specifying xyz-coordinates of cell at list entry `i`. See also -------- PopulationSuper.calc_min_cell_interdist """ x = (np.random.rand(self.POPULATION_SIZE)-0.5)*radius*2 y = (np.random.rand(self.POPULATION_SIZE)-0.5)*radius*2 z = np.random.rand(self.POPULATION_SIZE)*(z_max - z_min) + z_min min_r_z = {} min_r = np.array(min_r) if min_r.size > 0: if type(min_r) == type(np.array([])): j = 0 for j in range(min_r.shape[0]): min_r_z[j] = np.interp(z, min_r[0,], min_r[1,]) if j > 0: [w] = np.where(min_r_z[j] < min_r_z[j-1]) min_r_z[j][w] = min_r_z[j-1][w] minrz = min_r_z[j] else: minrz = np.interp(z, min_r[0], min_r[1]) R_z = np.sqrt(x**2 + y**2) #want to make sure that no somas are in the same place. cell_interdist = self.calc_min_cell_interdist(x, y, z) [u] = np.where(np.logical_or((R_z < minrz) != (R_z > radius), cell_interdist < min_cell_interdist)) while len(u) > 0: for i in range(len(u)): x[u[i]] = (np.random.rand()-0.5)*radius*2 y[u[i]] = (np.random.rand()-0.5)*radius*2 z[u[i]] = np.random.rand()*(z_max - z_min) + z_min if type(min_r) == type(()): for j in range(np.shape(min_r)[0]): min_r_z[j][u[i]] = \ np.interp(z[u[i]], min_r[0,], min_r[1,]) if j > 0: [w] = np.where(min_r_z[j] < min_r_z[j-1]) min_r_z[j][w] = min_r_z[j-1][w] minrz = min_r_z[j] else: minrz[u[i]] = np.interp(z[u[i]], min_r[0,], min_r[1,]) R_z = np.sqrt(x**2 + y**2) #want to make sure that no somas are in the same place. cell_interdist = self.calc_min_cell_interdist(x, y, z) [u] = np.where(np.logical_or((R_z < minrz) != (R_z > radius), cell_interdist < min_cell_interdist)) soma_pos = [] for i in range(self.POPULATION_SIZE): soma_pos.append({'xpos' : x[i], 'ypos' : y[i], 'zpos' : z[i]}) return soma_pos
python
def draw_rand_pos(self, radius, z_min, z_max, min_r=np.array([0]), min_cell_interdist=10., **args): """ Draw some random location within radius, z_min, z_max, and constrained by min_r and the minimum cell interdistance. Returned argument is a list of dicts [{'xpos', 'ypos', 'zpos'},]. Parameters ---------- radius : float Radius of population. z_min : float Lower z-boundary of population. z_max : float Upper z-boundary of population. min_r : numpy.ndarray Minimum distance to center axis as function of z. min_cell_interdist : float Minimum cell to cell interdistance. **args : keyword arguments Additional inputs that is being ignored. Returns ------- soma_pos : list List of dicts of len population size where dict have keys xpos, ypos, zpos specifying xyz-coordinates of cell at list entry `i`. See also -------- PopulationSuper.calc_min_cell_interdist """ x = (np.random.rand(self.POPULATION_SIZE)-0.5)*radius*2 y = (np.random.rand(self.POPULATION_SIZE)-0.5)*radius*2 z = np.random.rand(self.POPULATION_SIZE)*(z_max - z_min) + z_min min_r_z = {} min_r = np.array(min_r) if min_r.size > 0: if type(min_r) == type(np.array([])): j = 0 for j in range(min_r.shape[0]): min_r_z[j] = np.interp(z, min_r[0,], min_r[1,]) if j > 0: [w] = np.where(min_r_z[j] < min_r_z[j-1]) min_r_z[j][w] = min_r_z[j-1][w] minrz = min_r_z[j] else: minrz = np.interp(z, min_r[0], min_r[1]) R_z = np.sqrt(x**2 + y**2) #want to make sure that no somas are in the same place. cell_interdist = self.calc_min_cell_interdist(x, y, z) [u] = np.where(np.logical_or((R_z < minrz) != (R_z > radius), cell_interdist < min_cell_interdist)) while len(u) > 0: for i in range(len(u)): x[u[i]] = (np.random.rand()-0.5)*radius*2 y[u[i]] = (np.random.rand()-0.5)*radius*2 z[u[i]] = np.random.rand()*(z_max - z_min) + z_min if type(min_r) == type(()): for j in range(np.shape(min_r)[0]): min_r_z[j][u[i]] = \ np.interp(z[u[i]], min_r[0,], min_r[1,]) if j > 0: [w] = np.where(min_r_z[j] < min_r_z[j-1]) min_r_z[j][w] = min_r_z[j-1][w] minrz = min_r_z[j] else: minrz[u[i]] = np.interp(z[u[i]], min_r[0,], min_r[1,]) R_z = np.sqrt(x**2 + y**2) #want to make sure that no somas are in the same place. cell_interdist = self.calc_min_cell_interdist(x, y, z) [u] = np.where(np.logical_or((R_z < minrz) != (R_z > radius), cell_interdist < min_cell_interdist)) soma_pos = [] for i in range(self.POPULATION_SIZE): soma_pos.append({'xpos' : x[i], 'ypos' : y[i], 'zpos' : z[i]}) return soma_pos
[ "def", "draw_rand_pos", "(", "self", ",", "radius", ",", "z_min", ",", "z_max", ",", "min_r", "=", "np", ".", "array", "(", "[", "0", "]", ")", ",", "min_cell_interdist", "=", "10.", ",", "*", "*", "args", ")", ":", "x", "=", "(", "np", ".", "r...
Draw some random location within radius, z_min, z_max, and constrained by min_r and the minimum cell interdistance. Returned argument is a list of dicts [{'xpos', 'ypos', 'zpos'},]. Parameters ---------- radius : float Radius of population. z_min : float Lower z-boundary of population. z_max : float Upper z-boundary of population. min_r : numpy.ndarray Minimum distance to center axis as function of z. min_cell_interdist : float Minimum cell to cell interdistance. **args : keyword arguments Additional inputs that is being ignored. Returns ------- soma_pos : list List of dicts of len population size where dict have keys xpos, ypos, zpos specifying xyz-coordinates of cell at list entry `i`. See also -------- PopulationSuper.calc_min_cell_interdist
[ "Draw", "some", "random", "location", "within", "radius", "z_min", "z_max", "and", "constrained", "by", "min_r", "and", "the", "minimum", "cell", "interdistance", ".", "Returned", "argument", "is", "a", "list", "of", "dicts", "[", "{", "xpos", "ypos", "zpos"...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L502-L592
INM-6/hybridLFPy
hybridLFPy/population.py
PopulationSuper.calc_signal_sum
def calc_signal_sum(self, measure='LFP'): """ Superimpose each cell's contribution to the compound population signal, i.e., the population CSD or LFP Parameters ---------- measure : str {'LFP', 'CSD'}: Either 'LFP' or 'CSD'. Returns ------- numpy.ndarray The populations-specific compound signal. """ #compute the total LFP of cells on this RANK if self.RANK_CELLINDICES.size > 0: for i, cellindex in enumerate(self.RANK_CELLINDICES): if i == 0: data = self.output[cellindex][measure] else: data += self.output[cellindex][measure] else: data = np.zeros((len(self.electrodeParams['x']), self.cellParams['tstopms']/self.dt_output + 1), dtype=np.float32) #container for full LFP on RANK 0 if RANK == 0: DATA = np.zeros_like(data, dtype=np.float32) else: DATA = None #sum to RANK 0 using automatic type discovery with MPI COMM.Reduce(data, DATA, op=MPI.SUM, root=0) return DATA
python
def calc_signal_sum(self, measure='LFP'): """ Superimpose each cell's contribution to the compound population signal, i.e., the population CSD or LFP Parameters ---------- measure : str {'LFP', 'CSD'}: Either 'LFP' or 'CSD'. Returns ------- numpy.ndarray The populations-specific compound signal. """ #compute the total LFP of cells on this RANK if self.RANK_CELLINDICES.size > 0: for i, cellindex in enumerate(self.RANK_CELLINDICES): if i == 0: data = self.output[cellindex][measure] else: data += self.output[cellindex][measure] else: data = np.zeros((len(self.electrodeParams['x']), self.cellParams['tstopms']/self.dt_output + 1), dtype=np.float32) #container for full LFP on RANK 0 if RANK == 0: DATA = np.zeros_like(data, dtype=np.float32) else: DATA = None #sum to RANK 0 using automatic type discovery with MPI COMM.Reduce(data, DATA, op=MPI.SUM, root=0) return DATA
[ "def", "calc_signal_sum", "(", "self", ",", "measure", "=", "'LFP'", ")", ":", "#compute the total LFP of cells on this RANK", "if", "self", ".", "RANK_CELLINDICES", ".", "size", ">", "0", ":", "for", "i", ",", "cellindex", "in", "enumerate", "(", "self", ".",...
Superimpose each cell's contribution to the compound population signal, i.e., the population CSD or LFP Parameters ---------- measure : str {'LFP', 'CSD'}: Either 'LFP' or 'CSD'. Returns ------- numpy.ndarray The populations-specific compound signal.
[ "Superimpose", "each", "cell", "s", "contribution", "to", "the", "compound", "population", "signal", "i", ".", "e", ".", "the", "population", "CSD", "or", "LFP" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L595-L634
INM-6/hybridLFPy
hybridLFPy/population.py
PopulationSuper.collectSingleContribs
def collectSingleContribs(self, measure='LFP'): """ Collect single cell data and save them to HDF5 file. The function will also return signals generated by all cells Parameters ---------- measure : str {'LFP', 'CSD'}: Either 'LFP' or 'CSD'. Returns ------- numpy.ndarray output of all neurons in population, axis 0 correspond to neuron ind """ try: assert(self.recordSingleContribFrac <= 1 and self.recordSingleContribFrac >= 0) except AssertionError as ae: raise ae, 'recordSingleContribFrac {} not in [0, 1]'.format( self.recordSingleContribFrac) if not self.recordSingleContribFrac: return else: #reconstruct RANK_CELLINDICES of all RANKs for controlling #communication if self.recordSingleContribFrac == 1.: SAMPLESIZE = self.POPULATION_SIZE RANK_CELLINDICES = [] for i in range(SIZE): RANK_CELLINDICES += [self.CELLINDICES[ self.CELLINDICES % SIZE == i]] else: SAMPLESIZE = int(self.recordSingleContribFrac * self.POPULATION_SIZE) RANK_CELLINDICES = [] for i in range(SIZE): ids = self.CELLINDICES[self.CELLINDICES % SIZE == i] RANK_CELLINDICES += [ids[ids < SAMPLESIZE]] #gather data on this RANK if RANK_CELLINDICES[RANK].size > 0: for i, cellindex in enumerate(RANK_CELLINDICES[RANK]): if i == 0: data_temp = np.zeros([RANK_CELLINDICES[RANK].size] + list(self.output[cellindex ][measure].shape), dtype=np.float32) data_temp[i, ] = self.output[cellindex][measure] if RANK == 0: #container of all output data = np.zeros([SAMPLESIZE] + list(self.output[cellindex][measure].shape), dtype=np.float32) #fill in values from this RANK if RANK_CELLINDICES[0].size > 0: for j, k in enumerate(RANK_CELLINDICES[0]): data[k, ] = data_temp[j, ] #iterate over all other RANKs for i in range(1, len(RANK_CELLINDICES)): if RANK_CELLINDICES[i].size > 0: #receive on RANK 0 from all other RANK data_temp = np.zeros([RANK_CELLINDICES[i].size] + list(self.output[cellindex ][measure].shape), dtype=np.float32) COMM.Recv([data_temp, MPI.FLOAT], source=i, tag=13) #fill in values for j, k in enumerate(RANK_CELLINDICES[i]): data[k, ] = data_temp[j, ] else: data = None if RANK_CELLINDICES[RANK].size > 0: #send to RANK 0 COMM.Send([data_temp, MPI.FLOAT], dest=0, tag=13) if RANK == 0: #save all single-cell data to file fname = os.path.join(self.populations_path, '%s_%ss.h5' % (self.y, measure)) f = h5py.File(fname, 'w') f.create_dataset('data', data=data, compression=4) f['srate'] = self.output[0]['srate'] f.close() assert(os.path.isfile(fname)) print('file %s_%ss.h5 ok' % (self.y, measure)) COMM.Barrier() return data
python
def collectSingleContribs(self, measure='LFP'): """ Collect single cell data and save them to HDF5 file. The function will also return signals generated by all cells Parameters ---------- measure : str {'LFP', 'CSD'}: Either 'LFP' or 'CSD'. Returns ------- numpy.ndarray output of all neurons in population, axis 0 correspond to neuron ind """ try: assert(self.recordSingleContribFrac <= 1 and self.recordSingleContribFrac >= 0) except AssertionError as ae: raise ae, 'recordSingleContribFrac {} not in [0, 1]'.format( self.recordSingleContribFrac) if not self.recordSingleContribFrac: return else: #reconstruct RANK_CELLINDICES of all RANKs for controlling #communication if self.recordSingleContribFrac == 1.: SAMPLESIZE = self.POPULATION_SIZE RANK_CELLINDICES = [] for i in range(SIZE): RANK_CELLINDICES += [self.CELLINDICES[ self.CELLINDICES % SIZE == i]] else: SAMPLESIZE = int(self.recordSingleContribFrac * self.POPULATION_SIZE) RANK_CELLINDICES = [] for i in range(SIZE): ids = self.CELLINDICES[self.CELLINDICES % SIZE == i] RANK_CELLINDICES += [ids[ids < SAMPLESIZE]] #gather data on this RANK if RANK_CELLINDICES[RANK].size > 0: for i, cellindex in enumerate(RANK_CELLINDICES[RANK]): if i == 0: data_temp = np.zeros([RANK_CELLINDICES[RANK].size] + list(self.output[cellindex ][measure].shape), dtype=np.float32) data_temp[i, ] = self.output[cellindex][measure] if RANK == 0: #container of all output data = np.zeros([SAMPLESIZE] + list(self.output[cellindex][measure].shape), dtype=np.float32) #fill in values from this RANK if RANK_CELLINDICES[0].size > 0: for j, k in enumerate(RANK_CELLINDICES[0]): data[k, ] = data_temp[j, ] #iterate over all other RANKs for i in range(1, len(RANK_CELLINDICES)): if RANK_CELLINDICES[i].size > 0: #receive on RANK 0 from all other RANK data_temp = np.zeros([RANK_CELLINDICES[i].size] + list(self.output[cellindex ][measure].shape), dtype=np.float32) COMM.Recv([data_temp, MPI.FLOAT], source=i, tag=13) #fill in values for j, k in enumerate(RANK_CELLINDICES[i]): data[k, ] = data_temp[j, ] else: data = None if RANK_CELLINDICES[RANK].size > 0: #send to RANK 0 COMM.Send([data_temp, MPI.FLOAT], dest=0, tag=13) if RANK == 0: #save all single-cell data to file fname = os.path.join(self.populations_path, '%s_%ss.h5' % (self.y, measure)) f = h5py.File(fname, 'w') f.create_dataset('data', data=data, compression=4) f['srate'] = self.output[0]['srate'] f.close() assert(os.path.isfile(fname)) print('file %s_%ss.h5 ok' % (self.y, measure)) COMM.Barrier() return data
[ "def", "collectSingleContribs", "(", "self", ",", "measure", "=", "'LFP'", ")", ":", "try", ":", "assert", "(", "self", ".", "recordSingleContribFrac", "<=", "1", "and", "self", ".", "recordSingleContribFrac", ">=", "0", ")", "except", "AssertionError", "as", ...
Collect single cell data and save them to HDF5 file. The function will also return signals generated by all cells Parameters ---------- measure : str {'LFP', 'CSD'}: Either 'LFP' or 'CSD'. Returns ------- numpy.ndarray output of all neurons in population, axis 0 correspond to neuron ind
[ "Collect", "single", "cell", "data", "and", "save", "them", "to", "HDF5", "file", ".", "The", "function", "will", "also", "return", "signals", "generated", "by", "all", "cells" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L637-L735
INM-6/hybridLFPy
hybridLFPy/population.py
PopulationSuper.collect_data
def collect_data(self): """ Collect LFPs, CSDs and soma traces from each simulated population, and save to file. Parameters ---------- None Returns ------- None """ #collect some measurements resolved per file and save to file for measure in ['LFP', 'CSD']: if measure in self.savelist: self.collectSingleContribs(measure) #calculate lfp from all cell contribs lfp = self.calc_signal_sum(measure='LFP') #calculate CSD in every lamina if self.calculateCSD: csd = self.calc_signal_sum(measure='CSD') if RANK == 0 and self.POPULATION_SIZE > 0: #saving LFPs if 'LFP' in self.savelist: fname = os.path.join(self.populations_path, self.output_file.format(self.y, 'LFP')+'.h5') f = h5py.File(fname, 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=lfp, compression=4) f.close() del lfp assert(os.path.isfile(fname)) print('save lfp ok') #saving CSDs if 'CSD' in self.savelist and self.calculateCSD: fname = os.path.join(self.populations_path, self.output_file.format(self.y, 'CSD')+'.h5') f = h5py.File(fname, 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=csd, compression=4) f.close() del csd assert(os.path.isfile(fname)) print('save CSD ok') #save the somatic placements: pop_soma_pos = np.zeros((self.POPULATION_SIZE, 3)) keys = ['xpos', 'ypos', 'zpos'] for i in range(self.POPULATION_SIZE): for j in range(3): pop_soma_pos[i, j] = self.pop_soma_pos[i][keys[j]] fname = os.path.join(self.populations_path, self.output_file.format(self.y, 'somapos.gdf')) np.savetxt(fname, pop_soma_pos) assert(os.path.isfile(fname)) print('save somapos ok') #save rotations using hdf5 fname = os.path.join(self.populations_path, self.output_file.format(self.y, 'rotations.h5')) f = h5py.File(fname, 'w') f.create_dataset('x', (len(self.rotations),)) f.create_dataset('y', (len(self.rotations),)) f.create_dataset('z', (len(self.rotations),)) for i, rot in enumerate(self.rotations): for key, value in list(rot.items()): f[key][i] = value f.close() assert(os.path.isfile(fname)) print('save rotations ok') #resync threads COMM.Barrier()
python
def collect_data(self): """ Collect LFPs, CSDs and soma traces from each simulated population, and save to file. Parameters ---------- None Returns ------- None """ #collect some measurements resolved per file and save to file for measure in ['LFP', 'CSD']: if measure in self.savelist: self.collectSingleContribs(measure) #calculate lfp from all cell contribs lfp = self.calc_signal_sum(measure='LFP') #calculate CSD in every lamina if self.calculateCSD: csd = self.calc_signal_sum(measure='CSD') if RANK == 0 and self.POPULATION_SIZE > 0: #saving LFPs if 'LFP' in self.savelist: fname = os.path.join(self.populations_path, self.output_file.format(self.y, 'LFP')+'.h5') f = h5py.File(fname, 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=lfp, compression=4) f.close() del lfp assert(os.path.isfile(fname)) print('save lfp ok') #saving CSDs if 'CSD' in self.savelist and self.calculateCSD: fname = os.path.join(self.populations_path, self.output_file.format(self.y, 'CSD')+'.h5') f = h5py.File(fname, 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=csd, compression=4) f.close() del csd assert(os.path.isfile(fname)) print('save CSD ok') #save the somatic placements: pop_soma_pos = np.zeros((self.POPULATION_SIZE, 3)) keys = ['xpos', 'ypos', 'zpos'] for i in range(self.POPULATION_SIZE): for j in range(3): pop_soma_pos[i, j] = self.pop_soma_pos[i][keys[j]] fname = os.path.join(self.populations_path, self.output_file.format(self.y, 'somapos.gdf')) np.savetxt(fname, pop_soma_pos) assert(os.path.isfile(fname)) print('save somapos ok') #save rotations using hdf5 fname = os.path.join(self.populations_path, self.output_file.format(self.y, 'rotations.h5')) f = h5py.File(fname, 'w') f.create_dataset('x', (len(self.rotations),)) f.create_dataset('y', (len(self.rotations),)) f.create_dataset('z', (len(self.rotations),)) for i, rot in enumerate(self.rotations): for key, value in list(rot.items()): f[key][i] = value f.close() assert(os.path.isfile(fname)) print('save rotations ok') #resync threads COMM.Barrier()
[ "def", "collect_data", "(", "self", ")", ":", "#collect some measurements resolved per file and save to file", "for", "measure", "in", "[", "'LFP'", ",", "'CSD'", "]", ":", "if", "measure", "in", "self", ".", "savelist", ":", "self", ".", "collectSingleContribs", ...
Collect LFPs, CSDs and soma traces from each simulated population, and save to file. Parameters ---------- None Returns ------- None
[ "Collect", "LFPs", "CSDs", "and", "soma", "traces", "from", "each", "simulated", "population", "and", "save", "to", "file", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L738-L825
INM-6/hybridLFPy
hybridLFPy/population.py
Population.get_all_synIdx
def get_all_synIdx(self): """ Auxilliary function to set up class attributes containing synapse locations given as LFPy.Cell compartment indices This function takes no inputs. Parameters ---------- None Returns ------- synIdx : dict `output[cellindex][populationindex][layerindex]` numpy.ndarray of compartment indices. See also -------- Population.get_synidx, Population.fetchSynIdxCell """ tic = time() #containers for synapse idxs existing on this rank synIdx = {} #ok then, we will draw random numbers across ranks, which have to #be unique per cell. Now, we simply record the random state, #change the seed per cell, and put the original state back below. randomstate = np.random.get_state() for cellindex in self.RANK_CELLINDICES: #set the random seed on for each cellindex np.random.seed(self.POPULATIONSEED + cellindex) #find synapse locations for cell in parallel synIdx[cellindex] = self.get_synidx(cellindex) #reset the random number generator np.random.set_state(randomstate) if RANK == 0: print('found synapse locations in %.2f seconds' % (time()-tic)) #print the number of synapses per layer from which presynapse population if self.verbose: for cellindex in self.RANK_CELLINDICES: for i, synidx in enumerate(synIdx[cellindex]): print('to:\t%s\tcell:\t%i\tfrom:\t%s:' % (self.y, cellindex, self.X[i]),) idxcount = 0 for idx in synidx: idxcount += idx.size print('\t%i' % idx.size,) print('\ttotal %i' % idxcount) return synIdx
python
def get_all_synIdx(self): """ Auxilliary function to set up class attributes containing synapse locations given as LFPy.Cell compartment indices This function takes no inputs. Parameters ---------- None Returns ------- synIdx : dict `output[cellindex][populationindex][layerindex]` numpy.ndarray of compartment indices. See also -------- Population.get_synidx, Population.fetchSynIdxCell """ tic = time() #containers for synapse idxs existing on this rank synIdx = {} #ok then, we will draw random numbers across ranks, which have to #be unique per cell. Now, we simply record the random state, #change the seed per cell, and put the original state back below. randomstate = np.random.get_state() for cellindex in self.RANK_CELLINDICES: #set the random seed on for each cellindex np.random.seed(self.POPULATIONSEED + cellindex) #find synapse locations for cell in parallel synIdx[cellindex] = self.get_synidx(cellindex) #reset the random number generator np.random.set_state(randomstate) if RANK == 0: print('found synapse locations in %.2f seconds' % (time()-tic)) #print the number of synapses per layer from which presynapse population if self.verbose: for cellindex in self.RANK_CELLINDICES: for i, synidx in enumerate(synIdx[cellindex]): print('to:\t%s\tcell:\t%i\tfrom:\t%s:' % (self.y, cellindex, self.X[i]),) idxcount = 0 for idx in synidx: idxcount += idx.size print('\t%i' % idx.size,) print('\ttotal %i' % idxcount) return synIdx
[ "def", "get_all_synIdx", "(", "self", ")", ":", "tic", "=", "time", "(", ")", "#containers for synapse idxs existing on this rank", "synIdx", "=", "{", "}", "#ok then, we will draw random numbers across ranks, which have to", "#be unique per cell. Now, we simply record the random s...
Auxilliary function to set up class attributes containing synapse locations given as LFPy.Cell compartment indices This function takes no inputs. Parameters ---------- None Returns ------- synIdx : dict `output[cellindex][populationindex][layerindex]` numpy.ndarray of compartment indices. See also -------- Population.get_synidx, Population.fetchSynIdxCell
[ "Auxilliary", "function", "to", "set", "up", "class", "attributes", "containing", "synapse", "locations", "given", "as", "LFPy", ".", "Cell", "compartment", "indices" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L971-L1031
INM-6/hybridLFPy
hybridLFPy/population.py
Population.get_all_SpCells
def get_all_SpCells(self): """ For each postsynaptic cell existing on this RANK, load or compute the presynaptic cell index for each synaptic connection This function takes no kwargs. Parameters ---------- None Returns ------- SpCells : dict `output[cellindex][populationname][layerindex]`, np.array of presynaptic cell indices. See also -------- Population.fetchSpCells """ tic = time() #container SpCells = {} #ok then, we will draw random numbers across ranks, which have to #be unique per cell. Now, we simply record the random state, #change the seed per cell, and put the original state back below. randomstate = np.random.get_state() for cellindex in self.RANK_CELLINDICES: #set the random seed on for each cellindex np.random.seed(self.POPULATIONSEED + cellindex + self.POPULATION_SIZE) SpCells[cellindex] = {} for i, X in enumerate(self.X): SpCells[cellindex][X] = self.fetchSpCells( self.networkSim.nodes[X], self.k_yXL[:, i]) #reset the random number generator np.random.set_state(randomstate) if RANK == 0: print('found presynaptic cells in %.2f seconds' % (time()-tic)) return SpCells
python
def get_all_SpCells(self): """ For each postsynaptic cell existing on this RANK, load or compute the presynaptic cell index for each synaptic connection This function takes no kwargs. Parameters ---------- None Returns ------- SpCells : dict `output[cellindex][populationname][layerindex]`, np.array of presynaptic cell indices. See also -------- Population.fetchSpCells """ tic = time() #container SpCells = {} #ok then, we will draw random numbers across ranks, which have to #be unique per cell. Now, we simply record the random state, #change the seed per cell, and put the original state back below. randomstate = np.random.get_state() for cellindex in self.RANK_CELLINDICES: #set the random seed on for each cellindex np.random.seed(self.POPULATIONSEED + cellindex + self.POPULATION_SIZE) SpCells[cellindex] = {} for i, X in enumerate(self.X): SpCells[cellindex][X] = self.fetchSpCells( self.networkSim.nodes[X], self.k_yXL[:, i]) #reset the random number generator np.random.set_state(randomstate) if RANK == 0: print('found presynaptic cells in %.2f seconds' % (time()-tic)) return SpCells
[ "def", "get_all_SpCells", "(", "self", ")", ":", "tic", "=", "time", "(", ")", "#container", "SpCells", "=", "{", "}", "#ok then, we will draw random numbers across ranks, which have to", "#be unique per cell. Now, we simply record the random state,", "#change the seed per cell, ...
For each postsynaptic cell existing on this RANK, load or compute the presynaptic cell index for each synaptic connection This function takes no kwargs. Parameters ---------- None Returns ------- SpCells : dict `output[cellindex][populationname][layerindex]`, np.array of presynaptic cell indices. See also -------- Population.fetchSpCells
[ "For", "each", "postsynaptic", "cell", "existing", "on", "this", "RANK", "load", "or", "compute", "the", "presynaptic", "cell", "index", "for", "each", "synaptic", "connection" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L1034-L1084
INM-6/hybridLFPy
hybridLFPy/population.py
Population.get_all_synDelays
def get_all_synDelays(self): """ Create and load arrays of connection delays per connection on this rank Get random normally distributed synaptic delays, returns dict of nested list of same shape as SpCells. Delays are rounded to dt. This function takes no kwargs. Parameters ---------- None Returns ------- dict output[cellindex][populationname][layerindex]`, np.array of delays per connection. See also -------- numpy.random.normal """ tic = time() #ok then, we will draw random numbers across ranks, which have to #be unique per cell. Now, we simply record the random state, #change the seed per cell, and put the original state back below. randomstate = np.random.get_state() #container delays = {} for cellindex in self.RANK_CELLINDICES: #set the random seed on for each cellindex np.random.seed(self.POPULATIONSEED + cellindex + 2*self.POPULATION_SIZE) delays[cellindex] = {} for j, X in enumerate(self.X): delays[cellindex][X] = [] for i in self.k_yXL[:, j]: loc = self.synDelayLoc[j] loc /= self.dt scale = self.synDelayScale[j] if scale is not None: scale /= self.dt delay = np.random.normal(loc, scale, i).astype(int) while np.any(delay < 1): inds = delay < 1 delay[inds] = np.random.normal(loc, scale, inds.sum()).astype(int) delay = delay.astype(float) delay *= self.dt else: delay = np.zeros(i) + self.synDelayLoc[j] delays[cellindex][X].append(delay) #reset the random number generator np.random.set_state(randomstate) if RANK == 0: print('found delays in %.2f seconds' % (time()-tic)) return delays
python
def get_all_synDelays(self): """ Create and load arrays of connection delays per connection on this rank Get random normally distributed synaptic delays, returns dict of nested list of same shape as SpCells. Delays are rounded to dt. This function takes no kwargs. Parameters ---------- None Returns ------- dict output[cellindex][populationname][layerindex]`, np.array of delays per connection. See also -------- numpy.random.normal """ tic = time() #ok then, we will draw random numbers across ranks, which have to #be unique per cell. Now, we simply record the random state, #change the seed per cell, and put the original state back below. randomstate = np.random.get_state() #container delays = {} for cellindex in self.RANK_CELLINDICES: #set the random seed on for each cellindex np.random.seed(self.POPULATIONSEED + cellindex + 2*self.POPULATION_SIZE) delays[cellindex] = {} for j, X in enumerate(self.X): delays[cellindex][X] = [] for i in self.k_yXL[:, j]: loc = self.synDelayLoc[j] loc /= self.dt scale = self.synDelayScale[j] if scale is not None: scale /= self.dt delay = np.random.normal(loc, scale, i).astype(int) while np.any(delay < 1): inds = delay < 1 delay[inds] = np.random.normal(loc, scale, inds.sum()).astype(int) delay = delay.astype(float) delay *= self.dt else: delay = np.zeros(i) + self.synDelayLoc[j] delays[cellindex][X].append(delay) #reset the random number generator np.random.set_state(randomstate) if RANK == 0: print('found delays in %.2f seconds' % (time()-tic)) return delays
[ "def", "get_all_synDelays", "(", "self", ")", ":", "tic", "=", "time", "(", ")", "#ok then, we will draw random numbers across ranks, which have to", "#be unique per cell. Now, we simply record the random state,", "#change the seed per cell, and put the original state back below.", "rand...
Create and load arrays of connection delays per connection on this rank Get random normally distributed synaptic delays, returns dict of nested list of same shape as SpCells. Delays are rounded to dt. This function takes no kwargs. Parameters ---------- None Returns ------- dict output[cellindex][populationname][layerindex]`, np.array of delays per connection. See also -------- numpy.random.normal
[ "Create", "and", "load", "arrays", "of", "connection", "delays", "per", "connection", "on", "this", "rank" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L1087-L1156
INM-6/hybridLFPy
hybridLFPy/population.py
Population.get_synidx
def get_synidx(self, cellindex): """ Local function, draw and return synapse locations corresponding to a single cell, using a random seed set as `POPULATIONSEED` + `cellindex`. Parameters ---------- cellindex : int Index of cell object. Returns ------- synidx : dict `LFPy.Cell` compartment indices See also -------- Population.get_all_synIdx, Population.fetchSynIdxCell """ #create a cell instance cell = self.cellsim(cellindex, return_just_cell=True) #local containers synidx = {} #get synaptic placements and cells from the network, #then set spike times, for i, X in enumerate(self.X): synidx[X] = self.fetchSynIdxCell(cell=cell, nidx=self.k_yXL[:, i], synParams=self.synParams.copy()) return synidx
python
def get_synidx(self, cellindex): """ Local function, draw and return synapse locations corresponding to a single cell, using a random seed set as `POPULATIONSEED` + `cellindex`. Parameters ---------- cellindex : int Index of cell object. Returns ------- synidx : dict `LFPy.Cell` compartment indices See also -------- Population.get_all_synIdx, Population.fetchSynIdxCell """ #create a cell instance cell = self.cellsim(cellindex, return_just_cell=True) #local containers synidx = {} #get synaptic placements and cells from the network, #then set spike times, for i, X in enumerate(self.X): synidx[X] = self.fetchSynIdxCell(cell=cell, nidx=self.k_yXL[:, i], synParams=self.synParams.copy()) return synidx
[ "def", "get_synidx", "(", "self", ",", "cellindex", ")", ":", "#create a cell instance", "cell", "=", "self", ".", "cellsim", "(", "cellindex", ",", "return_just_cell", "=", "True", ")", "#local containers", "synidx", "=", "{", "}", "#get synaptic placements and c...
Local function, draw and return synapse locations corresponding to a single cell, using a random seed set as `POPULATIONSEED` + `cellindex`. Parameters ---------- cellindex : int Index of cell object. Returns ------- synidx : dict `LFPy.Cell` compartment indices See also -------- Population.get_all_synIdx, Population.fetchSynIdxCell
[ "Local", "function", "draw", "and", "return", "synapse", "locations", "corresponding", "to", "a", "single", "cell", "using", "a", "random", "seed", "set", "as", "POPULATIONSEED", "+", "cellindex", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L1159-L1197
INM-6/hybridLFPy
hybridLFPy/population.py
Population.fetchSynIdxCell
def fetchSynIdxCell(self, cell, nidx, synParams): """ Find possible synaptic placements for each cell As synapses are placed within layers with bounds determined by self.layerBoundaries, it will check this matrix accordingly, and use the probabilities from `self.connProbLayer to distribute. For each layer, the synapses are placed with probability normalized by membrane area of each compartment Parameters ---------- cell : `LFPy.Cell` instance nidx : numpy.ndarray Numbers of synapses per presynaptic population X. synParams : which `LFPy.Synapse` parameters to use. Returns ------- syn_idx : list List of arrays of synapse placements per connection. See also -------- Population.get_all_synIdx, Population.get_synIdx, LFPy.Synapse """ #segment indices in each layer is stored here, list of np.array syn_idx = [] #loop over layer bounds, find synapse locations for i, zz in enumerate(self.layerBoundaries): if nidx[i] == 0: syn_idx.append(np.array([], dtype=int)) else: syn_idx.append(cell.get_rand_idx_area_norm( section=synParams['section'], nidx=nidx[i], z_min=zz.min(), z_max=zz.max()).astype('int16')) return syn_idx
python
def fetchSynIdxCell(self, cell, nidx, synParams): """ Find possible synaptic placements for each cell As synapses are placed within layers with bounds determined by self.layerBoundaries, it will check this matrix accordingly, and use the probabilities from `self.connProbLayer to distribute. For each layer, the synapses are placed with probability normalized by membrane area of each compartment Parameters ---------- cell : `LFPy.Cell` instance nidx : numpy.ndarray Numbers of synapses per presynaptic population X. synParams : which `LFPy.Synapse` parameters to use. Returns ------- syn_idx : list List of arrays of synapse placements per connection. See also -------- Population.get_all_synIdx, Population.get_synIdx, LFPy.Synapse """ #segment indices in each layer is stored here, list of np.array syn_idx = [] #loop over layer bounds, find synapse locations for i, zz in enumerate(self.layerBoundaries): if nidx[i] == 0: syn_idx.append(np.array([], dtype=int)) else: syn_idx.append(cell.get_rand_idx_area_norm( section=synParams['section'], nidx=nidx[i], z_min=zz.min(), z_max=zz.max()).astype('int16')) return syn_idx
[ "def", "fetchSynIdxCell", "(", "self", ",", "cell", ",", "nidx", ",", "synParams", ")", ":", "#segment indices in each layer is stored here, list of np.array", "syn_idx", "=", "[", "]", "#loop over layer bounds, find synapse locations", "for", "i", ",", "zz", "in", "enu...
Find possible synaptic placements for each cell As synapses are placed within layers with bounds determined by self.layerBoundaries, it will check this matrix accordingly, and use the probabilities from `self.connProbLayer to distribute. For each layer, the synapses are placed with probability normalized by membrane area of each compartment Parameters ---------- cell : `LFPy.Cell` instance nidx : numpy.ndarray Numbers of synapses per presynaptic population X. synParams : which `LFPy.Synapse` parameters to use. Returns ------- syn_idx : list List of arrays of synapse placements per connection. See also -------- Population.get_all_synIdx, Population.get_synIdx, LFPy.Synapse
[ "Find", "possible", "synaptic", "placements", "for", "each", "cell", "As", "synapses", "are", "placed", "within", "layers", "with", "bounds", "determined", "by", "self", ".", "layerBoundaries", "it", "will", "check", "this", "matrix", "accordingly", "and", "use"...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L1200-L1244
INM-6/hybridLFPy
hybridLFPy/population.py
Population.cellsim
def cellsim(self, cellindex, return_just_cell = False): """ Do the actual simulations of LFP, using synaptic spike times from network simulation. Parameters ---------- cellindex : int cell index between 0 and population size-1. return_just_cell : bool If True, return only the `LFPy.Cell` object if False, run full simulation, return None. Returns ------- None or `LFPy.Cell` object See also -------- hybridLFPy.csd, LFPy.Cell, LFPy.Synapse, LFPy.RecExtElectrode """ tic = time() cell = LFPy.Cell(**self.cellParams) cell.set_pos(**self.pop_soma_pos[cellindex]) cell.set_rotation(**self.rotations[cellindex]) if return_just_cell: #with several cells, NEURON can only hold one cell at the time allsecnames = [] allsec = [] for sec in cell.allseclist: allsecnames.append(sec.name()) for seg in sec: allsec.append(sec.name()) cell.allsecnames = allsecnames cell.allsec = allsec return cell else: self.insert_all_synapses(cellindex, cell) #electrode object where LFPs are calculated electrode = LFPy.RecExtElectrode(**self.electrodeParams) if self.calculateCSD: cell.tvec = np.arange(cell.totnsegs) cell.imem = np.eye(cell.totnsegs) csdcoeff = csd.true_lam_csd(cell, self.populationParams['radius'], electrode.z) csdcoeff *= 1E6 #nA mum^-3 -> muA mm^-3 conversion del cell.tvec, cell.imem cell.simulate(electrode, dotprodcoeffs=[csdcoeff], **self.simulationParams) cell.CSD = helpers.decimate(cell.dotprodresults[0], q=self.decimatefrac) else: cell.simulate(electrode, **self.simulationParams) cell.LFP = helpers.decimate(electrode.LFP, q=self.decimatefrac) cell.x = electrode.x cell.y = electrode.y cell.z = electrode.z cell.electrodecoeff = electrode.electrodecoeff #put all necessary cell output in output dict for attrbt in self.savelist: attr = getattr(cell, attrbt) if type(attr) == np.ndarray: self.output[cellindex][attrbt] = attr.astype('float32') else: try: self.output[cellindex][attrbt] = attr except: self.output[cellindex][attrbt] = str(attr) self.output[cellindex]['srate'] = 1E3 / self.dt_output print('cell %s population %s in %.2f s' % (cellindex, self.y, time()-tic))
python
def cellsim(self, cellindex, return_just_cell = False): """ Do the actual simulations of LFP, using synaptic spike times from network simulation. Parameters ---------- cellindex : int cell index between 0 and population size-1. return_just_cell : bool If True, return only the `LFPy.Cell` object if False, run full simulation, return None. Returns ------- None or `LFPy.Cell` object See also -------- hybridLFPy.csd, LFPy.Cell, LFPy.Synapse, LFPy.RecExtElectrode """ tic = time() cell = LFPy.Cell(**self.cellParams) cell.set_pos(**self.pop_soma_pos[cellindex]) cell.set_rotation(**self.rotations[cellindex]) if return_just_cell: #with several cells, NEURON can only hold one cell at the time allsecnames = [] allsec = [] for sec in cell.allseclist: allsecnames.append(sec.name()) for seg in sec: allsec.append(sec.name()) cell.allsecnames = allsecnames cell.allsec = allsec return cell else: self.insert_all_synapses(cellindex, cell) #electrode object where LFPs are calculated electrode = LFPy.RecExtElectrode(**self.electrodeParams) if self.calculateCSD: cell.tvec = np.arange(cell.totnsegs) cell.imem = np.eye(cell.totnsegs) csdcoeff = csd.true_lam_csd(cell, self.populationParams['radius'], electrode.z) csdcoeff *= 1E6 #nA mum^-3 -> muA mm^-3 conversion del cell.tvec, cell.imem cell.simulate(electrode, dotprodcoeffs=[csdcoeff], **self.simulationParams) cell.CSD = helpers.decimate(cell.dotprodresults[0], q=self.decimatefrac) else: cell.simulate(electrode, **self.simulationParams) cell.LFP = helpers.decimate(electrode.LFP, q=self.decimatefrac) cell.x = electrode.x cell.y = electrode.y cell.z = electrode.z cell.electrodecoeff = electrode.electrodecoeff #put all necessary cell output in output dict for attrbt in self.savelist: attr = getattr(cell, attrbt) if type(attr) == np.ndarray: self.output[cellindex][attrbt] = attr.astype('float32') else: try: self.output[cellindex][attrbt] = attr except: self.output[cellindex][attrbt] = str(attr) self.output[cellindex]['srate'] = 1E3 / self.dt_output print('cell %s population %s in %.2f s' % (cellindex, self.y, time()-tic))
[ "def", "cellsim", "(", "self", ",", "cellindex", ",", "return_just_cell", "=", "False", ")", ":", "tic", "=", "time", "(", ")", "cell", "=", "LFPy", ".", "Cell", "(", "*", "*", "self", ".", "cellParams", ")", "cell", ".", "set_pos", "(", "*", "*", ...
Do the actual simulations of LFP, using synaptic spike times from network simulation. Parameters ---------- cellindex : int cell index between 0 and population size-1. return_just_cell : bool If True, return only the `LFPy.Cell` object if False, run full simulation, return None. Returns ------- None or `LFPy.Cell` object See also -------- hybridLFPy.csd, LFPy.Cell, LFPy.Synapse, LFPy.RecExtElectrode
[ "Do", "the", "actual", "simulations", "of", "LFP", "using", "synaptic", "spike", "times", "from", "network", "simulation", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L1246-L1331
INM-6/hybridLFPy
hybridLFPy/population.py
Population.insert_all_synapses
def insert_all_synapses(self, cellindex, cell): """ Insert all synaptic events from all presynaptic layers on cell object with index `cellindex`. Parameters ---------- cellindex : int cell index in the population. cell : `LFPy.Cell` instance Postsynaptic target cell. Returns ------- None See also -------- Population.insert_synapse """ for i, X in enumerate(self.X): #range(self.k_yXL.shape[1]): synParams = self.synParams synParams.update({ 'weight' : self.J_yX[i], 'tau' : self.tau_yX[i], }) for j in range(len(self.synIdx[cellindex][X])): if self.synDelays is not None: synDelays = self.synDelays[cellindex][X][j] else: synDelays = None self.insert_synapses(cell = cell, cellindex = cellindex, synParams = synParams, idx = self.synIdx[cellindex][X][j], X=X, SpCell = self.SpCells[cellindex][X][j], synDelays = synDelays)
python
def insert_all_synapses(self, cellindex, cell): """ Insert all synaptic events from all presynaptic layers on cell object with index `cellindex`. Parameters ---------- cellindex : int cell index in the population. cell : `LFPy.Cell` instance Postsynaptic target cell. Returns ------- None See also -------- Population.insert_synapse """ for i, X in enumerate(self.X): #range(self.k_yXL.shape[1]): synParams = self.synParams synParams.update({ 'weight' : self.J_yX[i], 'tau' : self.tau_yX[i], }) for j in range(len(self.synIdx[cellindex][X])): if self.synDelays is not None: synDelays = self.synDelays[cellindex][X][j] else: synDelays = None self.insert_synapses(cell = cell, cellindex = cellindex, synParams = synParams, idx = self.synIdx[cellindex][X][j], X=X, SpCell = self.SpCells[cellindex][X][j], synDelays = synDelays)
[ "def", "insert_all_synapses", "(", "self", ",", "cellindex", ",", "cell", ")", ":", "for", "i", ",", "X", "in", "enumerate", "(", "self", ".", "X", ")", ":", "#range(self.k_yXL.shape[1]):", "synParams", "=", "self", ".", "synParams", "synParams", ".", "upd...
Insert all synaptic events from all presynaptic layers on cell object with index `cellindex`. Parameters ---------- cellindex : int cell index in the population. cell : `LFPy.Cell` instance Postsynaptic target cell. Returns ------- None See also -------- Population.insert_synapse
[ "Insert", "all", "synaptic", "events", "from", "all", "presynaptic", "layers", "on", "cell", "object", "with", "index", "cellindex", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L1334-L1376
INM-6/hybridLFPy
hybridLFPy/population.py
Population.insert_synapses
def insert_synapses(self, cell, cellindex, synParams, idx = np.array([]), X='EX', SpCell = np.array([]), synDelays = None): """ Insert synapse with `parameters`=`synparams` on cell=cell, with segment indexes given by `idx`. `SpCell` and `SpTimes` picked from Brunel network simulation Parameters ---------- cell : `LFPy.Cell` instance Postsynaptic target cell. cellindex : int Index of cell in population. synParams : dict Parameters passed to `LFPy.Synapse`. idx : numpy.ndarray Postsynaptic compartment indices. X : str presynaptic population name SpCell : numpy.ndarray Presynaptic spiking cells. synDelays : numpy.ndarray Per connection specific delays. Returns ------- None See also -------- Population.insert_all_synapses """ #Insert synapses in an iterative fashion try: spikes = self.networkSim.dbs[X].select(SpCell[:idx.size]) except AttributeError as ae: raise ae, 'could not open CachedNetwork database objects' #apply synaptic delays if synDelays is not None and idx.size > 0: for i, delay in enumerate(synDelays): if spikes[i].size > 0: spikes[i] += delay #create synapse events: for i in range(idx.size): if len(spikes[i]) == 0: pass #print 'no spike times, skipping network cell #%i' % SpCell[i] else: synParams.update({'idx' : idx[i]}) # Create synapse(s) and setting times using class LFPy.Synapse synapse = LFPy.Synapse(cell, **synParams) #SpCell is a vector, or do not exist synapse.set_spike_times(spikes[i] + cell.tstartms)
python
def insert_synapses(self, cell, cellindex, synParams, idx = np.array([]), X='EX', SpCell = np.array([]), synDelays = None): """ Insert synapse with `parameters`=`synparams` on cell=cell, with segment indexes given by `idx`. `SpCell` and `SpTimes` picked from Brunel network simulation Parameters ---------- cell : `LFPy.Cell` instance Postsynaptic target cell. cellindex : int Index of cell in population. synParams : dict Parameters passed to `LFPy.Synapse`. idx : numpy.ndarray Postsynaptic compartment indices. X : str presynaptic population name SpCell : numpy.ndarray Presynaptic spiking cells. synDelays : numpy.ndarray Per connection specific delays. Returns ------- None See also -------- Population.insert_all_synapses """ #Insert synapses in an iterative fashion try: spikes = self.networkSim.dbs[X].select(SpCell[:idx.size]) except AttributeError as ae: raise ae, 'could not open CachedNetwork database objects' #apply synaptic delays if synDelays is not None and idx.size > 0: for i, delay in enumerate(synDelays): if spikes[i].size > 0: spikes[i] += delay #create synapse events: for i in range(idx.size): if len(spikes[i]) == 0: pass #print 'no spike times, skipping network cell #%i' % SpCell[i] else: synParams.update({'idx' : idx[i]}) # Create synapse(s) and setting times using class LFPy.Synapse synapse = LFPy.Synapse(cell, **synParams) #SpCell is a vector, or do not exist synapse.set_spike_times(spikes[i] + cell.tstartms)
[ "def", "insert_synapses", "(", "self", ",", "cell", ",", "cellindex", ",", "synParams", ",", "idx", "=", "np", ".", "array", "(", "[", "]", ")", ",", "X", "=", "'EX'", ",", "SpCell", "=", "np", ".", "array", "(", "[", "]", ")", ",", "synDelays", ...
Insert synapse with `parameters`=`synparams` on cell=cell, with segment indexes given by `idx`. `SpCell` and `SpTimes` picked from Brunel network simulation Parameters ---------- cell : `LFPy.Cell` instance Postsynaptic target cell. cellindex : int Index of cell in population. synParams : dict Parameters passed to `LFPy.Synapse`. idx : numpy.ndarray Postsynaptic compartment indices. X : str presynaptic population name SpCell : numpy.ndarray Presynaptic spiking cells. synDelays : numpy.ndarray Per connection specific delays. Returns ------- None See also -------- Population.insert_all_synapses
[ "Insert", "synapse", "with", "parameters", "=", "synparams", "on", "cell", "=", "cell", "with", "segment", "indexes", "given", "by", "idx", ".", "SpCell", "and", "SpTimes", "picked", "from", "Brunel", "network", "simulation" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L1379-L1439
INM-6/hybridLFPy
hybridLFPy/population.py
Population.fetchSpCells
def fetchSpCells(self, nodes, numSyn): """ For N (nodes count) nestSim-cells draw POPULATION_SIZE x NTIMES random cell indexes in the population in nodes and broadcast these as `SpCell`. The returned argument is a list with len = numSyn.size of np.arrays, assumes `numSyn` is a list Parameters ---------- nodes : numpy.ndarray, dtype=int Node # of valid presynaptic neurons. numSyn : numpy.ndarray, dtype=int # of synapses per connection. Returns ------- SpCells : list presynaptic network-neuron indices See also -------- Population.fetch_all_SpCells """ SpCell = [] for size in numSyn: SpCell.append(np.random.randint(nodes.min(), nodes.max(), size=size).astype('int32')) return SpCell
python
def fetchSpCells(self, nodes, numSyn): """ For N (nodes count) nestSim-cells draw POPULATION_SIZE x NTIMES random cell indexes in the population in nodes and broadcast these as `SpCell`. The returned argument is a list with len = numSyn.size of np.arrays, assumes `numSyn` is a list Parameters ---------- nodes : numpy.ndarray, dtype=int Node # of valid presynaptic neurons. numSyn : numpy.ndarray, dtype=int # of synapses per connection. Returns ------- SpCells : list presynaptic network-neuron indices See also -------- Population.fetch_all_SpCells """ SpCell = [] for size in numSyn: SpCell.append(np.random.randint(nodes.min(), nodes.max(), size=size).astype('int32')) return SpCell
[ "def", "fetchSpCells", "(", "self", ",", "nodes", ",", "numSyn", ")", ":", "SpCell", "=", "[", "]", "for", "size", "in", "numSyn", ":", "SpCell", ".", "append", "(", "np", ".", "random", ".", "randint", "(", "nodes", ".", "min", "(", ")", ",", "n...
For N (nodes count) nestSim-cells draw POPULATION_SIZE x NTIMES random cell indexes in the population in nodes and broadcast these as `SpCell`. The returned argument is a list with len = numSyn.size of np.arrays, assumes `numSyn` is a list Parameters ---------- nodes : numpy.ndarray, dtype=int Node # of valid presynaptic neurons. numSyn : numpy.ndarray, dtype=int # of synapses per connection. Returns ------- SpCells : list presynaptic network-neuron indices See also -------- Population.fetch_all_SpCells
[ "For", "N", "(", "nodes", "count", ")", "nestSim", "-", "cells", "draw", "POPULATION_SIZE", "x", "NTIMES", "random", "cell", "indexes", "in", "the", "population", "in", "nodes", "and", "broadcast", "these", "as", "SpCell", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/population.py#L1442-L1474
alpha-xone/xone
xone/procs.py
run
def run(func, keys, max_procs=None, show_proc=False, affinity=None, **kwargs): """ Provide interface for multiprocessing Args: func: callable functions keys: keys in kwargs that want to use process max_procs: max number of processes show_proc: whether to show process affinity: CPU affinity **kwargs: kwargs for func """ if max_procs is None: max_procs = cpu_count() kw_arr = saturate_kwargs(keys=keys, **kwargs) if len(kw_arr) == 0: return if isinstance(affinity, int): win32process.SetProcessAffinityMask(win32api.GetCurrentProcess(), affinity) task_queue = queue.Queue() while len(kw_arr) > 0: for _ in range(max_procs): if len(kw_arr) == 0: break kw = kw_arr.pop(0) p = Process(target=func, kwargs=kw) p.start() sys.stdout.flush() task_queue.put(p) if show_proc: signature = ', '.join([f'{k}={v}' for k, v in kw.items()]) print(f'[{func.__name__}] ({signature})') while not task_queue.empty(): p = task_queue.get() p.join()
python
def run(func, keys, max_procs=None, show_proc=False, affinity=None, **kwargs): """ Provide interface for multiprocessing Args: func: callable functions keys: keys in kwargs that want to use process max_procs: max number of processes show_proc: whether to show process affinity: CPU affinity **kwargs: kwargs for func """ if max_procs is None: max_procs = cpu_count() kw_arr = saturate_kwargs(keys=keys, **kwargs) if len(kw_arr) == 0: return if isinstance(affinity, int): win32process.SetProcessAffinityMask(win32api.GetCurrentProcess(), affinity) task_queue = queue.Queue() while len(kw_arr) > 0: for _ in range(max_procs): if len(kw_arr) == 0: break kw = kw_arr.pop(0) p = Process(target=func, kwargs=kw) p.start() sys.stdout.flush() task_queue.put(p) if show_proc: signature = ', '.join([f'{k}={v}' for k, v in kw.items()]) print(f'[{func.__name__}] ({signature})') while not task_queue.empty(): p = task_queue.get() p.join()
[ "def", "run", "(", "func", ",", "keys", ",", "max_procs", "=", "None", ",", "show_proc", "=", "False", ",", "affinity", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "max_procs", "is", "None", ":", "max_procs", "=", "cpu_count", "(", ")", "...
Provide interface for multiprocessing Args: func: callable functions keys: keys in kwargs that want to use process max_procs: max number of processes show_proc: whether to show process affinity: CPU affinity **kwargs: kwargs for func
[ "Provide", "interface", "for", "multiprocessing" ]
train
https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/procs.py#L16-L49
alpha-xone/xone
xone/procs.py
saturate_kwargs
def saturate_kwargs(keys, **kwargs): """ Saturate all combinations of kwargs Args: keys: keys in kwargs that want to use process **kwargs: kwargs for func """ # Validate if keys are in kwargs and if they are iterable if isinstance(keys, str): keys = [keys] keys = [k for k in keys if k in kwargs and hasattr(kwargs.get(k, None), '__iter__')] if len(keys) == 0: return [] # Saturate coordinates of kwargs kw_corr = list(product(*(range(len(kwargs[k])) for k in keys))) # Append all possible values kw_arr = [] for corr in kw_corr: kw_arr.append( dict(zip(keys, [kwargs[keys[i]][corr[i]] for i in range(len(keys))])) ) # All combinations of kwargs of inputs for k in keys: kwargs.pop(k, None) kw_arr = [{**k, **kwargs} for k in kw_arr] return kw_arr
python
def saturate_kwargs(keys, **kwargs): """ Saturate all combinations of kwargs Args: keys: keys in kwargs that want to use process **kwargs: kwargs for func """ # Validate if keys are in kwargs and if they are iterable if isinstance(keys, str): keys = [keys] keys = [k for k in keys if k in kwargs and hasattr(kwargs.get(k, None), '__iter__')] if len(keys) == 0: return [] # Saturate coordinates of kwargs kw_corr = list(product(*(range(len(kwargs[k])) for k in keys))) # Append all possible values kw_arr = [] for corr in kw_corr: kw_arr.append( dict(zip(keys, [kwargs[keys[i]][corr[i]] for i in range(len(keys))])) ) # All combinations of kwargs of inputs for k in keys: kwargs.pop(k, None) kw_arr = [{**k, **kwargs} for k in kw_arr] return kw_arr
[ "def", "saturate_kwargs", "(", "keys", ",", "*", "*", "kwargs", ")", ":", "# Validate if keys are in kwargs and if they are iterable", "if", "isinstance", "(", "keys", ",", "str", ")", ":", "keys", "=", "[", "keys", "]", "keys", "=", "[", "k", "for", "k", ...
Saturate all combinations of kwargs Args: keys: keys in kwargs that want to use process **kwargs: kwargs for func
[ "Saturate", "all", "combinations", "of", "kwargs" ]
train
https://github.com/alpha-xone/xone/blob/68534a30f7f1760b220ba58040be3927f7dfbcf4/xone/procs.py#L52-L78
mthh/smoomapy
smoomapy/core.py
quick_idw
def quick_idw(input_geojson_points, variable_name, power, nb_class, nb_pts=10000, resolution=None, disc_func=None, mask=None, user_defined_breaks=None, variable_name2=None, output='GeoJSON', **kwargs): """ Function acting as a one-shot wrapper around SmoothIdw object. Read a file of point values and optionnaly a mask file, return the smoothed representation as GeoJSON or GeoDataFrame. Parameters ---------- input_geojson_points : str Path to file to use as input (Points/Polygons) or GeoDataFrame object, must contains a relevant numerical field. variable_name : str The name of the variable to use (numerical field only). power : int or float The power of the function. nb_class : int, optionnal The number of class, if unset will most likely be 8. (default: None) nb_pts: int, optionnal The number of points to use for the underlying grid. (default: 10000) resolution : int, optionnal The resolution to use (in meters), if not set a default resolution will be used in order to make a grid containing around 10000 pts (default: None). disc_func: str, optionnal The name of the classification function to be used to decide on which break values to use to create the contour layer. (default: None) mask : str, optionnal Path to the file (Polygons only) to use as clipping mask, can also be a GeoDataFrame (default: None). user_defined_breaks : list or tuple, optionnal A list of ordered break to use to construct the contours (overrides `nb_class` and `disc_func` values if any, default: None). variable_name2 : str, optionnal The name of the 2nd variable to use (numerical field only); values computed from this variable will be will be used as to divide values computed from the first variable (default: None) output : string, optionnal The type of output expected (not case-sensitive) in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON"). Returns ------- smoothed_result : bytes or GeoDataFrame, The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame. Examples -------- Basic usage, output to raw geojson (bytes): >>> result = quick_idw("some_file.geojson", "some_variable", power=2) More options, returning a GeoDataFrame: >>> smooth_gdf = quick_stewart("some_file.geojson", "some_variable", nb_class=8, disc_func="percentiles", output="GeoDataFrame") """ return SmoothIdw(input_geojson_points, variable_name, power, nb_pts, resolution, variable_name2, mask, **kwargs ).render(nb_class=nb_class, disc_func=disc_func, user_defined_breaks=user_defined_breaks, output=output)
python
def quick_idw(input_geojson_points, variable_name, power, nb_class, nb_pts=10000, resolution=None, disc_func=None, mask=None, user_defined_breaks=None, variable_name2=None, output='GeoJSON', **kwargs): """ Function acting as a one-shot wrapper around SmoothIdw object. Read a file of point values and optionnaly a mask file, return the smoothed representation as GeoJSON or GeoDataFrame. Parameters ---------- input_geojson_points : str Path to file to use as input (Points/Polygons) or GeoDataFrame object, must contains a relevant numerical field. variable_name : str The name of the variable to use (numerical field only). power : int or float The power of the function. nb_class : int, optionnal The number of class, if unset will most likely be 8. (default: None) nb_pts: int, optionnal The number of points to use for the underlying grid. (default: 10000) resolution : int, optionnal The resolution to use (in meters), if not set a default resolution will be used in order to make a grid containing around 10000 pts (default: None). disc_func: str, optionnal The name of the classification function to be used to decide on which break values to use to create the contour layer. (default: None) mask : str, optionnal Path to the file (Polygons only) to use as clipping mask, can also be a GeoDataFrame (default: None). user_defined_breaks : list or tuple, optionnal A list of ordered break to use to construct the contours (overrides `nb_class` and `disc_func` values if any, default: None). variable_name2 : str, optionnal The name of the 2nd variable to use (numerical field only); values computed from this variable will be will be used as to divide values computed from the first variable (default: None) output : string, optionnal The type of output expected (not case-sensitive) in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON"). Returns ------- smoothed_result : bytes or GeoDataFrame, The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame. Examples -------- Basic usage, output to raw geojson (bytes): >>> result = quick_idw("some_file.geojson", "some_variable", power=2) More options, returning a GeoDataFrame: >>> smooth_gdf = quick_stewart("some_file.geojson", "some_variable", nb_class=8, disc_func="percentiles", output="GeoDataFrame") """ return SmoothIdw(input_geojson_points, variable_name, power, nb_pts, resolution, variable_name2, mask, **kwargs ).render(nb_class=nb_class, disc_func=disc_func, user_defined_breaks=user_defined_breaks, output=output)
[ "def", "quick_idw", "(", "input_geojson_points", ",", "variable_name", ",", "power", ",", "nb_class", ",", "nb_pts", "=", "10000", ",", "resolution", "=", "None", ",", "disc_func", "=", "None", ",", "mask", "=", "None", ",", "user_defined_breaks", "=", "None...
Function acting as a one-shot wrapper around SmoothIdw object. Read a file of point values and optionnaly a mask file, return the smoothed representation as GeoJSON or GeoDataFrame. Parameters ---------- input_geojson_points : str Path to file to use as input (Points/Polygons) or GeoDataFrame object, must contains a relevant numerical field. variable_name : str The name of the variable to use (numerical field only). power : int or float The power of the function. nb_class : int, optionnal The number of class, if unset will most likely be 8. (default: None) nb_pts: int, optionnal The number of points to use for the underlying grid. (default: 10000) resolution : int, optionnal The resolution to use (in meters), if not set a default resolution will be used in order to make a grid containing around 10000 pts (default: None). disc_func: str, optionnal The name of the classification function to be used to decide on which break values to use to create the contour layer. (default: None) mask : str, optionnal Path to the file (Polygons only) to use as clipping mask, can also be a GeoDataFrame (default: None). user_defined_breaks : list or tuple, optionnal A list of ordered break to use to construct the contours (overrides `nb_class` and `disc_func` values if any, default: None). variable_name2 : str, optionnal The name of the 2nd variable to use (numerical field only); values computed from this variable will be will be used as to divide values computed from the first variable (default: None) output : string, optionnal The type of output expected (not case-sensitive) in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON"). Returns ------- smoothed_result : bytes or GeoDataFrame, The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame. Examples -------- Basic usage, output to raw geojson (bytes): >>> result = quick_idw("some_file.geojson", "some_variable", power=2) More options, returning a GeoDataFrame: >>> smooth_gdf = quick_stewart("some_file.geojson", "some_variable", nb_class=8, disc_func="percentiles", output="GeoDataFrame")
[ "Function", "acting", "as", "a", "one", "-", "shot", "wrapper", "around", "SmoothIdw", "object", ".", "Read", "a", "file", "of", "point", "values", "and", "optionnaly", "a", "mask", "file", "return", "the", "smoothed", "representation", "as", "GeoJSON", "or"...
train
https://github.com/mthh/smoomapy/blob/a603a62e76592e84509591fddcde8bfb1e826b84/smoomapy/core.py#L23-L99
mthh/smoomapy
smoomapy/core.py
quick_stewart
def quick_stewart(input_geojson_points, variable_name, span, beta=2, typefct='exponential',nb_class=None, nb_pts=10000, resolution=None, mask=None, user_defined_breaks=None, variable_name2=None, output="GeoJSON", **kwargs): """ Function acting as a one-shot wrapper around SmoothStewart object. Read a file of point values and optionnaly a mask file, return the smoothed representation as GeoJSON or GeoDataFrame. Parameters ---------- input_geojson_points : str Path to file to use as input (Points/Polygons) or GeoDataFrame object, must contains a relevant numerical field. variable_name : str The name of the variable to use (numerical field only). span : int The span (meters). beta : float The beta! typefct : str, optionnal The type of function in {"exponential", "pareto"} (default: "exponential"). nb_class : int, optionnal The number of class, if unset will most likely be 8 (default: None) nb_pts: int, optionnal The number of points to use for the underlying grid. (default: 10000) resolution : int, optionnal The resolution to use (in meters), if not set a default resolution will be used in order to make a grid containing around 10000 pts (default: None). mask : str, optionnal Path to the file (Polygons only) to use as clipping mask, can also be a GeoDataFrame (default: None). user_defined_breaks : list or tuple, optionnal A list of ordered break to use to construct the contours (override `nb_class` value if any, default: None). variable_name2 : str, optionnal The name of the 2nd variable to use (numerical field only); values computed from this variable will be will be used as to divide values computed from the first variable (default: None) output : string, optionnal The type of output expected (not case-sensitive) in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON"). Returns ------- smoothed_result : bytes or GeoDataFrame, The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame. Examples -------- Basic usage, output to raw geojson (bytes): >>> result = quick_stewart("some_file.geojson", "some_variable", span=12500, beta=3, typefct="exponential") More options, returning a GeoDataFrame: >>> smooth_gdf = quick_stewart("some_file.geojson", "some_variable", span=12500, beta=3, typefct="pareto", output="GeoDataFrame") """ return SmoothStewart( input_geojson_points, variable_name, span, beta, typefct, nb_pts, resolution, variable_name2, mask, **kwargs ).render( nb_class=nb_class, user_defined_breaks=user_defined_breaks, output=output)
python
def quick_stewart(input_geojson_points, variable_name, span, beta=2, typefct='exponential',nb_class=None, nb_pts=10000, resolution=None, mask=None, user_defined_breaks=None, variable_name2=None, output="GeoJSON", **kwargs): """ Function acting as a one-shot wrapper around SmoothStewart object. Read a file of point values and optionnaly a mask file, return the smoothed representation as GeoJSON or GeoDataFrame. Parameters ---------- input_geojson_points : str Path to file to use as input (Points/Polygons) or GeoDataFrame object, must contains a relevant numerical field. variable_name : str The name of the variable to use (numerical field only). span : int The span (meters). beta : float The beta! typefct : str, optionnal The type of function in {"exponential", "pareto"} (default: "exponential"). nb_class : int, optionnal The number of class, if unset will most likely be 8 (default: None) nb_pts: int, optionnal The number of points to use for the underlying grid. (default: 10000) resolution : int, optionnal The resolution to use (in meters), if not set a default resolution will be used in order to make a grid containing around 10000 pts (default: None). mask : str, optionnal Path to the file (Polygons only) to use as clipping mask, can also be a GeoDataFrame (default: None). user_defined_breaks : list or tuple, optionnal A list of ordered break to use to construct the contours (override `nb_class` value if any, default: None). variable_name2 : str, optionnal The name of the 2nd variable to use (numerical field only); values computed from this variable will be will be used as to divide values computed from the first variable (default: None) output : string, optionnal The type of output expected (not case-sensitive) in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON"). Returns ------- smoothed_result : bytes or GeoDataFrame, The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame. Examples -------- Basic usage, output to raw geojson (bytes): >>> result = quick_stewart("some_file.geojson", "some_variable", span=12500, beta=3, typefct="exponential") More options, returning a GeoDataFrame: >>> smooth_gdf = quick_stewart("some_file.geojson", "some_variable", span=12500, beta=3, typefct="pareto", output="GeoDataFrame") """ return SmoothStewart( input_geojson_points, variable_name, span, beta, typefct, nb_pts, resolution, variable_name2, mask, **kwargs ).render( nb_class=nb_class, user_defined_breaks=user_defined_breaks, output=output)
[ "def", "quick_stewart", "(", "input_geojson_points", ",", "variable_name", ",", "span", ",", "beta", "=", "2", ",", "typefct", "=", "'exponential'", ",", "nb_class", "=", "None", ",", "nb_pts", "=", "10000", ",", "resolution", "=", "None", ",", "mask", "="...
Function acting as a one-shot wrapper around SmoothStewart object. Read a file of point values and optionnaly a mask file, return the smoothed representation as GeoJSON or GeoDataFrame. Parameters ---------- input_geojson_points : str Path to file to use as input (Points/Polygons) or GeoDataFrame object, must contains a relevant numerical field. variable_name : str The name of the variable to use (numerical field only). span : int The span (meters). beta : float The beta! typefct : str, optionnal The type of function in {"exponential", "pareto"} (default: "exponential"). nb_class : int, optionnal The number of class, if unset will most likely be 8 (default: None) nb_pts: int, optionnal The number of points to use for the underlying grid. (default: 10000) resolution : int, optionnal The resolution to use (in meters), if not set a default resolution will be used in order to make a grid containing around 10000 pts (default: None). mask : str, optionnal Path to the file (Polygons only) to use as clipping mask, can also be a GeoDataFrame (default: None). user_defined_breaks : list or tuple, optionnal A list of ordered break to use to construct the contours (override `nb_class` value if any, default: None). variable_name2 : str, optionnal The name of the 2nd variable to use (numerical field only); values computed from this variable will be will be used as to divide values computed from the first variable (default: None) output : string, optionnal The type of output expected (not case-sensitive) in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON"). Returns ------- smoothed_result : bytes or GeoDataFrame, The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame. Examples -------- Basic usage, output to raw geojson (bytes): >>> result = quick_stewart("some_file.geojson", "some_variable", span=12500, beta=3, typefct="exponential") More options, returning a GeoDataFrame: >>> smooth_gdf = quick_stewart("some_file.geojson", "some_variable", span=12500, beta=3, typefct="pareto", output="GeoDataFrame")
[ "Function", "acting", "as", "a", "one", "-", "shot", "wrapper", "around", "SmoothStewart", "object", ".", "Read", "a", "file", "of", "point", "values", "and", "optionnaly", "a", "mask", "file", "return", "the", "smoothed", "representation", "as", "GeoJSON", ...
train
https://github.com/mthh/smoomapy/blob/a603a62e76592e84509591fddcde8bfb1e826b84/smoomapy/core.py#L101-L181
mthh/smoomapy
smoomapy/core.py
make_regular_points_with_no_res
def make_regular_points_with_no_res(bounds, nb_points=10000): """ Return a regular grid of points within `bounds` with the specified number of points (or a close approximate value). Parameters ---------- bounds : 4-floats tuple The bbox of the grid, as xmin, ymin, xmax, ymax. nb_points : int, optionnal The desired number of points (default: 10000) Returns ------- points : numpy.array An array of coordinates shape : 2-floats tuple The number of points on each dimension (width, height) """ minlon, minlat, maxlon, maxlat = bounds minlon, minlat, maxlon, maxlat = bounds offset_lon = (maxlon - minlon) / 8 offset_lat = (maxlat - minlat) / 8 minlon -= offset_lon maxlon += offset_lon minlat -= offset_lat maxlat += offset_lat nb_x = int(nb_points**0.5) nb_y = int(nb_points**0.5) return ( np.linspace(minlon, maxlon, nb_x), np.linspace(minlat, maxlat, nb_y), (nb_y, nb_x) )
python
def make_regular_points_with_no_res(bounds, nb_points=10000): """ Return a regular grid of points within `bounds` with the specified number of points (or a close approximate value). Parameters ---------- bounds : 4-floats tuple The bbox of the grid, as xmin, ymin, xmax, ymax. nb_points : int, optionnal The desired number of points (default: 10000) Returns ------- points : numpy.array An array of coordinates shape : 2-floats tuple The number of points on each dimension (width, height) """ minlon, minlat, maxlon, maxlat = bounds minlon, minlat, maxlon, maxlat = bounds offset_lon = (maxlon - minlon) / 8 offset_lat = (maxlat - minlat) / 8 minlon -= offset_lon maxlon += offset_lon minlat -= offset_lat maxlat += offset_lat nb_x = int(nb_points**0.5) nb_y = int(nb_points**0.5) return ( np.linspace(minlon, maxlon, nb_x), np.linspace(minlat, maxlat, nb_y), (nb_y, nb_x) )
[ "def", "make_regular_points_with_no_res", "(", "bounds", ",", "nb_points", "=", "10000", ")", ":", "minlon", ",", "minlat", ",", "maxlon", ",", "maxlat", "=", "bounds", "minlon", ",", "minlat", ",", "maxlon", ",", "maxlat", "=", "bounds", "offset_lon", "=", ...
Return a regular grid of points within `bounds` with the specified number of points (or a close approximate value). Parameters ---------- bounds : 4-floats tuple The bbox of the grid, as xmin, ymin, xmax, ymax. nb_points : int, optionnal The desired number of points (default: 10000) Returns ------- points : numpy.array An array of coordinates shape : 2-floats tuple The number of points on each dimension (width, height)
[ "Return", "a", "regular", "grid", "of", "points", "within", "bounds", "with", "the", "specified", "number", "of", "points", "(", "or", "a", "close", "approximate", "value", ")", "." ]
train
https://github.com/mthh/smoomapy/blob/a603a62e76592e84509591fddcde8bfb1e826b84/smoomapy/core.py#L184-L219
mthh/smoomapy
smoomapy/core.py
make_regular_points
def make_regular_points(bounds, resolution, longlat=True): """ Return a regular grid of points within `bounds` with the specified resolution. Parameters ---------- bounds : 4-floats tuple The bbox of the grid, as xmin, ymin, xmax, ymax. resolution : int The resolution to use, in the same unit as `bounds` Returns ------- points : numpy.array An array of coordinates shape : 2-floats tuple The number of points on each dimension (width, height) """ # xmin, ymin, xmax, ymax = bounds minlon, minlat, maxlon, maxlat = bounds offset_lon = (maxlon - minlon) / 8 offset_lat = (maxlat - minlat) / 8 minlon -= offset_lon maxlon += offset_lon minlat -= offset_lat maxlat += offset_lat if longlat: height = hav_dist( np.array([(maxlon + minlon) / 2, minlat]), np.array([(maxlon + minlon) / 2, maxlat]) ) width = hav_dist( np.array([minlon, (maxlat + minlat) / 2]), np.array([maxlon, (maxlat + minlat) / 2]) ) else: height = np.linalg.norm( np.array([(maxlon + minlon) / 2, minlat]) - np.array([(maxlon + minlon) / 2, maxlat])) width = np.linalg.norm( np.array([minlon, (maxlat + minlat) / 2]) - np.array([maxlon, (maxlat + minlat) / 2])) nb_x = int(round(width / resolution)) nb_y = int(round(height / resolution)) if nb_y * 0.6 > nb_x: nb_x = int(nb_x + nb_x / 3) elif nb_x * 0.6 > nb_y: nb_y = int(nb_y + nb_y / 3) return ( np.linspace(minlon, maxlon, nb_x), np.linspace(minlat, maxlat, nb_y), (nb_y, nb_x) )
python
def make_regular_points(bounds, resolution, longlat=True): """ Return a regular grid of points within `bounds` with the specified resolution. Parameters ---------- bounds : 4-floats tuple The bbox of the grid, as xmin, ymin, xmax, ymax. resolution : int The resolution to use, in the same unit as `bounds` Returns ------- points : numpy.array An array of coordinates shape : 2-floats tuple The number of points on each dimension (width, height) """ # xmin, ymin, xmax, ymax = bounds minlon, minlat, maxlon, maxlat = bounds offset_lon = (maxlon - minlon) / 8 offset_lat = (maxlat - minlat) / 8 minlon -= offset_lon maxlon += offset_lon minlat -= offset_lat maxlat += offset_lat if longlat: height = hav_dist( np.array([(maxlon + minlon) / 2, minlat]), np.array([(maxlon + minlon) / 2, maxlat]) ) width = hav_dist( np.array([minlon, (maxlat + minlat) / 2]), np.array([maxlon, (maxlat + minlat) / 2]) ) else: height = np.linalg.norm( np.array([(maxlon + minlon) / 2, minlat]) - np.array([(maxlon + minlon) / 2, maxlat])) width = np.linalg.norm( np.array([minlon, (maxlat + minlat) / 2]) - np.array([maxlon, (maxlat + minlat) / 2])) nb_x = int(round(width / resolution)) nb_y = int(round(height / resolution)) if nb_y * 0.6 > nb_x: nb_x = int(nb_x + nb_x / 3) elif nb_x * 0.6 > nb_y: nb_y = int(nb_y + nb_y / 3) return ( np.linspace(minlon, maxlon, nb_x), np.linspace(minlat, maxlat, nb_y), (nb_y, nb_x) )
[ "def", "make_regular_points", "(", "bounds", ",", "resolution", ",", "longlat", "=", "True", ")", ":", "# xmin, ymin, xmax, ymax = bounds", "minlon", ",", "minlat", ",", "maxlon", ",", "maxlat", "=", "bounds", "offset_lon", "=", "(", "maxlon", "-", "minlon", ...
Return a regular grid of points within `bounds` with the specified resolution. Parameters ---------- bounds : 4-floats tuple The bbox of the grid, as xmin, ymin, xmax, ymax. resolution : int The resolution to use, in the same unit as `bounds` Returns ------- points : numpy.array An array of coordinates shape : 2-floats tuple The number of points on each dimension (width, height)
[ "Return", "a", "regular", "grid", "of", "points", "within", "bounds", "with", "the", "specified", "resolution", "." ]
train
https://github.com/mthh/smoomapy/blob/a603a62e76592e84509591fddcde8bfb1e826b84/smoomapy/core.py#L222-L277
mthh/smoomapy
smoomapy/core.py
make_dist_mat
def make_dist_mat(xy1, xy2, longlat=True): """ Return a distance matrix between two set of coordinates. Use geometric distance (default) or haversine distance (if longlat=True). Parameters ---------- xy1 : numpy.array The first set of coordinates as [(x, y), (x, y), (x, y)]. xy2 : numpy.array The second set of coordinates as [(x, y), (x, y), (x, y)]. longlat : boolean, optionnal Whether the coordinates are in geographic (longitude/latitude) format or not (default: False) Returns ------- mat_dist : numpy.array The distance matrix between xy1 and xy2 """ if longlat: return hav_dist(xy1[:, None], xy2) else: d0 = np.subtract.outer(xy1[:, 0], xy2[:, 0]) d1 = np.subtract.outer(xy1[:, 1], xy2[:, 1]) return np.hypot(d0, d1)
python
def make_dist_mat(xy1, xy2, longlat=True): """ Return a distance matrix between two set of coordinates. Use geometric distance (default) or haversine distance (if longlat=True). Parameters ---------- xy1 : numpy.array The first set of coordinates as [(x, y), (x, y), (x, y)]. xy2 : numpy.array The second set of coordinates as [(x, y), (x, y), (x, y)]. longlat : boolean, optionnal Whether the coordinates are in geographic (longitude/latitude) format or not (default: False) Returns ------- mat_dist : numpy.array The distance matrix between xy1 and xy2 """ if longlat: return hav_dist(xy1[:, None], xy2) else: d0 = np.subtract.outer(xy1[:, 0], xy2[:, 0]) d1 = np.subtract.outer(xy1[:, 1], xy2[:, 1]) return np.hypot(d0, d1)
[ "def", "make_dist_mat", "(", "xy1", ",", "xy2", ",", "longlat", "=", "True", ")", ":", "if", "longlat", ":", "return", "hav_dist", "(", "xy1", "[", ":", ",", "None", "]", ",", "xy2", ")", "else", ":", "d0", "=", "np", ".", "subtract", ".", "outer...
Return a distance matrix between two set of coordinates. Use geometric distance (default) or haversine distance (if longlat=True). Parameters ---------- xy1 : numpy.array The first set of coordinates as [(x, y), (x, y), (x, y)]. xy2 : numpy.array The second set of coordinates as [(x, y), (x, y), (x, y)]. longlat : boolean, optionnal Whether the coordinates are in geographic (longitude/latitude) format or not (default: False) Returns ------- mat_dist : numpy.array The distance matrix between xy1 and xy2
[ "Return", "a", "distance", "matrix", "between", "two", "set", "of", "coordinates", ".", "Use", "geometric", "distance", "(", "default", ")", "or", "haversine", "distance", "(", "if", "longlat", "=", "True", ")", "." ]
train
https://github.com/mthh/smoomapy/blob/a603a62e76592e84509591fddcde8bfb1e826b84/smoomapy/core.py#L291-L316
mthh/smoomapy
smoomapy/core.py
hav_dist
def hav_dist(locs1, locs2): """ Return a distance matrix between two set of coordinates. Use geometric distance (default) or haversine distance (if longlat=True). Parameters ---------- locs1 : numpy.array The first set of coordinates as [(long, lat), (long, lat)]. locs2 : numpy.array The second set of coordinates as [(long, lat), (long, lat)]. Returns ------- mat_dist : numpy.array The distance matrix between locs1 and locs2 """ # locs1 = np.radians(locs1) # locs2 = np.radians(locs2) cos_lat1 = np.cos(locs1[..., 0]) cos_lat2 = np.cos(locs2[..., 0]) cos_lat_d = np.cos(locs1[..., 0] - locs2[..., 0]) cos_lon_d = np.cos(locs1[..., 1] - locs2[..., 1]) return 6367000 * np.arccos( cos_lat_d - cos_lat1 * cos_lat2 * (1 - cos_lon_d))
python
def hav_dist(locs1, locs2): """ Return a distance matrix between two set of coordinates. Use geometric distance (default) or haversine distance (if longlat=True). Parameters ---------- locs1 : numpy.array The first set of coordinates as [(long, lat), (long, lat)]. locs2 : numpy.array The second set of coordinates as [(long, lat), (long, lat)]. Returns ------- mat_dist : numpy.array The distance matrix between locs1 and locs2 """ # locs1 = np.radians(locs1) # locs2 = np.radians(locs2) cos_lat1 = np.cos(locs1[..., 0]) cos_lat2 = np.cos(locs2[..., 0]) cos_lat_d = np.cos(locs1[..., 0] - locs2[..., 0]) cos_lon_d = np.cos(locs1[..., 1] - locs2[..., 1]) return 6367000 * np.arccos( cos_lat_d - cos_lat1 * cos_lat2 * (1 - cos_lon_d))
[ "def", "hav_dist", "(", "locs1", ",", "locs2", ")", ":", "# locs1 = np.radians(locs1)", "# locs2 = np.radians(locs2)", "cos_lat1", "=", "np", ".", "cos", "(", "locs1", "[", "...", ",", "0", "]", ")", "cos_lat2", "=", "np", ".", "cos", "(", "locs2", "...
Return a distance matrix between two set of coordinates. Use geometric distance (default) or haversine distance (if longlat=True). Parameters ---------- locs1 : numpy.array The first set of coordinates as [(long, lat), (long, lat)]. locs2 : numpy.array The second set of coordinates as [(long, lat), (long, lat)]. Returns ------- mat_dist : numpy.array The distance matrix between locs1 and locs2
[ "Return", "a", "distance", "matrix", "between", "two", "set", "of", "coordinates", ".", "Use", "geometric", "distance", "(", "default", ")", "or", "haversine", "distance", "(", "if", "longlat", "=", "True", ")", "." ]
train
https://github.com/mthh/smoomapy/blob/a603a62e76592e84509591fddcde8bfb1e826b84/smoomapy/core.py#L319-L343
mthh/smoomapy
smoomapy/core.py
isopoly_to_gdf
def isopoly_to_gdf(collec_poly, levels, field_name="levels"): """ Convert a collection of matplotlib.contour.QuadContourSet to a GeoDataFrame Set an attribute `field_name` on each feature, according to `levels` values (`levels` must have the same number of features as the collection of contours) Parameters ---------- collection_polygons : matplotlib.contour.QuadContourSet The result of a grid interpolation from matplotlib. levels : array-like The value to use as attributes for the constructed GeoDataFrame. field_name : str The name of the field to be fill by values contained in `levels` variable (default: "levels"). Returns ------- gdf_contours : GeoDataFrame The result as a GeoDataFrame. """ polygons, data = [], [] for i, polygon in enumerate(collec_poly.collections): mpoly = [] for path in polygon.get_paths(): path.should_simplify = False poly = path.to_polygons() exterior, holes = [], [] if len(poly) > 0 and len(poly[0]) > 3: exterior = poly[0] if len(poly) > 1: holes = [h for h in poly[1:] if len(h) > 3] mpoly.append(Polygon(exterior, holes)) if len(mpoly) > 1: mpoly = MultiPolygon(mpoly) polygons.append(mpoly) data.append(levels[i]) elif len(mpoly) == 1: polygons.append(mpoly[0]) data.append(levels[i]) return GeoDataFrame(geometry=polygons, data=data, columns=[field_name])
python
def isopoly_to_gdf(collec_poly, levels, field_name="levels"): """ Convert a collection of matplotlib.contour.QuadContourSet to a GeoDataFrame Set an attribute `field_name` on each feature, according to `levels` values (`levels` must have the same number of features as the collection of contours) Parameters ---------- collection_polygons : matplotlib.contour.QuadContourSet The result of a grid interpolation from matplotlib. levels : array-like The value to use as attributes for the constructed GeoDataFrame. field_name : str The name of the field to be fill by values contained in `levels` variable (default: "levels"). Returns ------- gdf_contours : GeoDataFrame The result as a GeoDataFrame. """ polygons, data = [], [] for i, polygon in enumerate(collec_poly.collections): mpoly = [] for path in polygon.get_paths(): path.should_simplify = False poly = path.to_polygons() exterior, holes = [], [] if len(poly) > 0 and len(poly[0]) > 3: exterior = poly[0] if len(poly) > 1: holes = [h for h in poly[1:] if len(h) > 3] mpoly.append(Polygon(exterior, holes)) if len(mpoly) > 1: mpoly = MultiPolygon(mpoly) polygons.append(mpoly) data.append(levels[i]) elif len(mpoly) == 1: polygons.append(mpoly[0]) data.append(levels[i]) return GeoDataFrame(geometry=polygons, data=data, columns=[field_name])
[ "def", "isopoly_to_gdf", "(", "collec_poly", ",", "levels", ",", "field_name", "=", "\"levels\"", ")", ":", "polygons", ",", "data", "=", "[", "]", ",", "[", "]", "for", "i", ",", "polygon", "in", "enumerate", "(", "collec_poly", ".", "collections", ")",...
Convert a collection of matplotlib.contour.QuadContourSet to a GeoDataFrame Set an attribute `field_name` on each feature, according to `levels` values (`levels` must have the same number of features as the collection of contours) Parameters ---------- collection_polygons : matplotlib.contour.QuadContourSet The result of a grid interpolation from matplotlib. levels : array-like The value to use as attributes for the constructed GeoDataFrame. field_name : str The name of the field to be fill by values contained in `levels` variable (default: "levels"). Returns ------- gdf_contours : GeoDataFrame The result as a GeoDataFrame.
[ "Convert", "a", "collection", "of", "matplotlib", ".", "contour", ".", "QuadContourSet", "to", "a", "GeoDataFrame", "Set", "an", "attribute", "field_name", "on", "each", "feature", "according", "to", "levels", "values", "(", "levels", "must", "have", "the", "s...
train
https://github.com/mthh/smoomapy/blob/a603a62e76592e84509591fddcde8bfb1e826b84/smoomapy/core.py#L346-L390
mthh/smoomapy
smoomapy/core.py
BaseSmooth.render
def render(self, nb_class=8, disc_func=None, user_defined_breaks=None, output="GeoJSON", new_mask=False): """ Parameters ---------- nb_class : int, optionnal The number of class (default: 8). disc_func : str, optionnal The kind of data classification to be used (to be choosed in "equal_interval", "jenks", "percentiles, "head_tail_breaks" and "prog_geom"), default: None. user_defined_breaks : list or tuple, optionnal A list of ordered break to use to construct the contours (override `nb_class` and `disc_func` values if any) (default: None). output : string, optionnal The type of output expected (not case-sensitive) in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON"). new_mask : str, optionnal Use a new mask by giving the path to the file (Polygons only) to use as clipping mask, can also be directly a GeoDataFrame (default: False). Returns ------- smoothed_result : bytes or GeoDataFrame The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame. """ if disc_func and 'jenks' in disc_func and not jenks_breaks: raise ValueError( "Missing jenkspy package - could not use jenks breaks") zi = self.zi if isinstance(new_mask, (type(False), type(None))): if not self.use_mask: self.use_mask = False self.mask = None else: self.open_mask(new_mask, None) # We want levels with the first break value as the minimum of the # interpolated values and the last break value as the maximum of theses # values: if user_defined_breaks: levels = user_defined_breaks if levels[len(levels) - 1] < np.nanmax(zi): levels = levels + [np.nanmax(zi)] if levels[0] > np.nanmin(zi): levels = [np.nanmin(zi)] + levels else: levels = self.define_levels(nb_class, disc_func) # Ensure that the levels are unique/increasing # to avoid error from `contourf` : s_levels = set(levels) if len(s_levels) != len(levels): levels = list(s_levels) levels.sort() try: collec_poly = contourf( self.XI, self.YI, zi.reshape(tuple(reversed(self.shape))).T, levels, vmax=abs(np.nanmax(zi)), vmin=-abs(np.nanmin(zi))) # Retry without setting the levels : except ValueError: collec_poly = contourf( self.XI, self.YI, zi.reshape(tuple(reversed(self.shape))).T, vmax=abs(np.nanmax(zi)), vmin=-abs(np.nanmin(zi))) # Fetch the levels returned by contourf: levels = collec_poly.levels # Set the maximum value at the maximum value of the interpolated values: levels[-1] = np.nanmax(zi) # Transform contourf contours into a GeoDataFrame of (Multi)Polygons: res = isopoly_to_gdf(collec_poly, levels=levels[1:], field_name="max") if self.longlat: def f(x, y, z=None): return (x / 0.017453292519943295, y / 0.017453292519943295) res.geometry = [transform(f, g) for g in res.geometry] res.crs = self.proj_to_use # Set the min/max/center values of each class as properties # if this contour layer: res["min"] = [np.nanmin(zi)] + res["max"][0:len(res)-1].tolist() res["center"] = (res["min"] + res["max"]) / 2 # Compute the intersection between the contour layer and the mask layer: ix_max_ft = len(res) - 1 if self.use_mask: res.loc[0:ix_max_ft, "geometry"] = res.geometry.buffer( 0).intersection(unary_union(self.mask.geometry.buffer(0))) # res.loc[0:ix_max_ft, "geometry"] = res.geometry.buffer( # 0).intersection(self.poly_max_extend.buffer(-0.1)) # Repair geometries if necessary : if not all(t in ("MultiPolygon", "Polygon") for t in res.geom_type): res.loc[0:ix_max_ft, "geometry"] = \ [geom if geom.type in ("Polygon", "MultiPolygon") else MultiPolygon( [j for j in geom if j.type in ('Polygon', 'MultiPolygon')] ) for geom in res.geometry] if "geojson" in output.lower(): return res.to_crs({"init": "epsg:4326"}).to_json().encode() else: return res
python
def render(self, nb_class=8, disc_func=None, user_defined_breaks=None, output="GeoJSON", new_mask=False): """ Parameters ---------- nb_class : int, optionnal The number of class (default: 8). disc_func : str, optionnal The kind of data classification to be used (to be choosed in "equal_interval", "jenks", "percentiles, "head_tail_breaks" and "prog_geom"), default: None. user_defined_breaks : list or tuple, optionnal A list of ordered break to use to construct the contours (override `nb_class` and `disc_func` values if any) (default: None). output : string, optionnal The type of output expected (not case-sensitive) in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON"). new_mask : str, optionnal Use a new mask by giving the path to the file (Polygons only) to use as clipping mask, can also be directly a GeoDataFrame (default: False). Returns ------- smoothed_result : bytes or GeoDataFrame The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame. """ if disc_func and 'jenks' in disc_func and not jenks_breaks: raise ValueError( "Missing jenkspy package - could not use jenks breaks") zi = self.zi if isinstance(new_mask, (type(False), type(None))): if not self.use_mask: self.use_mask = False self.mask = None else: self.open_mask(new_mask, None) # We want levels with the first break value as the minimum of the # interpolated values and the last break value as the maximum of theses # values: if user_defined_breaks: levels = user_defined_breaks if levels[len(levels) - 1] < np.nanmax(zi): levels = levels + [np.nanmax(zi)] if levels[0] > np.nanmin(zi): levels = [np.nanmin(zi)] + levels else: levels = self.define_levels(nb_class, disc_func) # Ensure that the levels are unique/increasing # to avoid error from `contourf` : s_levels = set(levels) if len(s_levels) != len(levels): levels = list(s_levels) levels.sort() try: collec_poly = contourf( self.XI, self.YI, zi.reshape(tuple(reversed(self.shape))).T, levels, vmax=abs(np.nanmax(zi)), vmin=-abs(np.nanmin(zi))) # Retry without setting the levels : except ValueError: collec_poly = contourf( self.XI, self.YI, zi.reshape(tuple(reversed(self.shape))).T, vmax=abs(np.nanmax(zi)), vmin=-abs(np.nanmin(zi))) # Fetch the levels returned by contourf: levels = collec_poly.levels # Set the maximum value at the maximum value of the interpolated values: levels[-1] = np.nanmax(zi) # Transform contourf contours into a GeoDataFrame of (Multi)Polygons: res = isopoly_to_gdf(collec_poly, levels=levels[1:], field_name="max") if self.longlat: def f(x, y, z=None): return (x / 0.017453292519943295, y / 0.017453292519943295) res.geometry = [transform(f, g) for g in res.geometry] res.crs = self.proj_to_use # Set the min/max/center values of each class as properties # if this contour layer: res["min"] = [np.nanmin(zi)] + res["max"][0:len(res)-1].tolist() res["center"] = (res["min"] + res["max"]) / 2 # Compute the intersection between the contour layer and the mask layer: ix_max_ft = len(res) - 1 if self.use_mask: res.loc[0:ix_max_ft, "geometry"] = res.geometry.buffer( 0).intersection(unary_union(self.mask.geometry.buffer(0))) # res.loc[0:ix_max_ft, "geometry"] = res.geometry.buffer( # 0).intersection(self.poly_max_extend.buffer(-0.1)) # Repair geometries if necessary : if not all(t in ("MultiPolygon", "Polygon") for t in res.geom_type): res.loc[0:ix_max_ft, "geometry"] = \ [geom if geom.type in ("Polygon", "MultiPolygon") else MultiPolygon( [j for j in geom if j.type in ('Polygon', 'MultiPolygon')] ) for geom in res.geometry] if "geojson" in output.lower(): return res.to_crs({"init": "epsg:4326"}).to_json().encode() else: return res
[ "def", "render", "(", "self", ",", "nb_class", "=", "8", ",", "disc_func", "=", "None", ",", "user_defined_breaks", "=", "None", ",", "output", "=", "\"GeoJSON\"", ",", "new_mask", "=", "False", ")", ":", "if", "disc_func", "and", "'jenks'", "in", "disc_...
Parameters ---------- nb_class : int, optionnal The number of class (default: 8). disc_func : str, optionnal The kind of data classification to be used (to be choosed in "equal_interval", "jenks", "percentiles, "head_tail_breaks" and "prog_geom"), default: None. user_defined_breaks : list or tuple, optionnal A list of ordered break to use to construct the contours (override `nb_class` and `disc_func` values if any) (default: None). output : string, optionnal The type of output expected (not case-sensitive) in {"GeoJSON", "GeoDataFrame"} (default: "GeoJSON"). new_mask : str, optionnal Use a new mask by giving the path to the file (Polygons only) to use as clipping mask, can also be directly a GeoDataFrame (default: False). Returns ------- smoothed_result : bytes or GeoDataFrame The result, dumped as GeoJSON (utf-8 encoded) or as a GeoDataFrame.
[ "Parameters", "----------", "nb_class", ":", "int", "optionnal", "The", "number", "of", "class", "(", "default", ":", "8", ")", ".", "disc_func", ":", "str", "optionnal", "The", "kind", "of", "data", "classification", "to", "be", "used", "(", "to", "be", ...
train
https://github.com/mthh/smoomapy/blob/a603a62e76592e84509591fddcde8bfb1e826b84/smoomapy/core.py#L485-L598
tsroten/hanzidentifier
hanzidentifier.py
identify
def identify(s): """Identify what kind of Chinese characters a string contains. *s* is a string to examine. The string's Chinese characters are tested to see if they are compatible with the Traditional or Simplified characters systems, compatible with both, or contain a mixture of Traditional and Simplified characters. The :data:`TRADITIONAL`, :data:`SIMPLIFIED`, :data:`BOTH`, or :data:`MIXED` constants are returned to indicate the string's identity. If *s* contains no Chinese characters, then :data:`UNKNOWN` is returned. All characters in a string that aren't found in the CC-CEDICT dictionary are ignored. Because the Traditional and Simplified Chinese character systems overlap, a string containing Simplified characters could identify as :data:`SIMPLIFIED` or :data:`BOTH` depending on if the characters are also Traditional characters. To make testing the identity of a string easier, the functions :func:`is_traditional`, :func:`is_simplified`, and :func:`has_chinese` are provided. """ chinese = _get_hanzi(s) if not chinese: return UNKNOWN if chinese.issubset(_SHARED_CHARACTERS): return BOTH if chinese.issubset(_TRADITIONAL_CHARACTERS): return TRADITIONAL if chinese.issubset(_SIMPLIFIED_CHARACTERS): return SIMPLIFIED return MIXED
python
def identify(s): """Identify what kind of Chinese characters a string contains. *s* is a string to examine. The string's Chinese characters are tested to see if they are compatible with the Traditional or Simplified characters systems, compatible with both, or contain a mixture of Traditional and Simplified characters. The :data:`TRADITIONAL`, :data:`SIMPLIFIED`, :data:`BOTH`, or :data:`MIXED` constants are returned to indicate the string's identity. If *s* contains no Chinese characters, then :data:`UNKNOWN` is returned. All characters in a string that aren't found in the CC-CEDICT dictionary are ignored. Because the Traditional and Simplified Chinese character systems overlap, a string containing Simplified characters could identify as :data:`SIMPLIFIED` or :data:`BOTH` depending on if the characters are also Traditional characters. To make testing the identity of a string easier, the functions :func:`is_traditional`, :func:`is_simplified`, and :func:`has_chinese` are provided. """ chinese = _get_hanzi(s) if not chinese: return UNKNOWN if chinese.issubset(_SHARED_CHARACTERS): return BOTH if chinese.issubset(_TRADITIONAL_CHARACTERS): return TRADITIONAL if chinese.issubset(_SIMPLIFIED_CHARACTERS): return SIMPLIFIED return MIXED
[ "def", "identify", "(", "s", ")", ":", "chinese", "=", "_get_hanzi", "(", "s", ")", "if", "not", "chinese", ":", "return", "UNKNOWN", "if", "chinese", ".", "issubset", "(", "_SHARED_CHARACTERS", ")", ":", "return", "BOTH", "if", "chinese", ".", "issubset...
Identify what kind of Chinese characters a string contains. *s* is a string to examine. The string's Chinese characters are tested to see if they are compatible with the Traditional or Simplified characters systems, compatible with both, or contain a mixture of Traditional and Simplified characters. The :data:`TRADITIONAL`, :data:`SIMPLIFIED`, :data:`BOTH`, or :data:`MIXED` constants are returned to indicate the string's identity. If *s* contains no Chinese characters, then :data:`UNKNOWN` is returned. All characters in a string that aren't found in the CC-CEDICT dictionary are ignored. Because the Traditional and Simplified Chinese character systems overlap, a string containing Simplified characters could identify as :data:`SIMPLIFIED` or :data:`BOTH` depending on if the characters are also Traditional characters. To make testing the identity of a string easier, the functions :func:`is_traditional`, :func:`is_simplified`, and :func:`has_chinese` are provided.
[ "Identify", "what", "kind", "of", "Chinese", "characters", "a", "string", "contains", "." ]
train
https://github.com/tsroten/hanzidentifier/blob/40b565222363f5582fd06dc14833d2b86f09fa35/hanzidentifier.py#L27-L58
tsroten/hanzidentifier
hanzidentifier.py
is_traditional
def is_traditional(s): """Check if a string's Chinese characters are Traditional. This is equivalent to: >>> identify('foo') in (TRADITIONAL, BOTH) """ chinese = _get_hanzi(s) if not chinese: return False elif chinese.issubset(_SHARED_CHARACTERS): return True elif chinese.issubset(_TRADITIONAL_CHARACTERS): return True return False
python
def is_traditional(s): """Check if a string's Chinese characters are Traditional. This is equivalent to: >>> identify('foo') in (TRADITIONAL, BOTH) """ chinese = _get_hanzi(s) if not chinese: return False elif chinese.issubset(_SHARED_CHARACTERS): return True elif chinese.issubset(_TRADITIONAL_CHARACTERS): return True return False
[ "def", "is_traditional", "(", "s", ")", ":", "chinese", "=", "_get_hanzi", "(", "s", ")", "if", "not", "chinese", ":", "return", "False", "elif", "chinese", ".", "issubset", "(", "_SHARED_CHARACTERS", ")", ":", "return", "True", "elif", "chinese", ".", "...
Check if a string's Chinese characters are Traditional. This is equivalent to: >>> identify('foo') in (TRADITIONAL, BOTH)
[ "Check", "if", "a", "string", "s", "Chinese", "characters", "are", "Traditional", "." ]
train
https://github.com/tsroten/hanzidentifier/blob/40b565222363f5582fd06dc14833d2b86f09fa35/hanzidentifier.py#L71-L85
tsroten/hanzidentifier
hanzidentifier.py
is_simplified
def is_simplified(s): """Check if a string's Chinese characters are Simplified. This is equivalent to: >>> identify('foo') in (SIMPLIFIED, BOTH) """ chinese = _get_hanzi(s) if not chinese: return False elif chinese.issubset(_SHARED_CHARACTERS): return True elif chinese.issubset(_SIMPLIFIED_CHARACTERS): return True return False
python
def is_simplified(s): """Check if a string's Chinese characters are Simplified. This is equivalent to: >>> identify('foo') in (SIMPLIFIED, BOTH) """ chinese = _get_hanzi(s) if not chinese: return False elif chinese.issubset(_SHARED_CHARACTERS): return True elif chinese.issubset(_SIMPLIFIED_CHARACTERS): return True return False
[ "def", "is_simplified", "(", "s", ")", ":", "chinese", "=", "_get_hanzi", "(", "s", ")", "if", "not", "chinese", ":", "return", "False", "elif", "chinese", ".", "issubset", "(", "_SHARED_CHARACTERS", ")", ":", "return", "True", "elif", "chinese", ".", "i...
Check if a string's Chinese characters are Simplified. This is equivalent to: >>> identify('foo') in (SIMPLIFIED, BOTH)
[ "Check", "if", "a", "string", "s", "Chinese", "characters", "are", "Simplified", "." ]
train
https://github.com/tsroten/hanzidentifier/blob/40b565222363f5582fd06dc14833d2b86f09fa35/hanzidentifier.py#L88-L102
ianclegg/winrmlib
winrmlib/api/session.py
Session.get
def get(self, resource, operation_timeout=None, max_envelope_size=None, locale=None): """ resource can be a URL or a ResourceLocator """ if isinstance(resource, str): resource = ResourceLocator(resource) headers = self._build_headers(resource, Session.GetAction, operation_timeout, max_envelope_size, locale) self.service.invoke.set_options(tsoapheaders=headers) return self.service.invoke
python
def get(self, resource, operation_timeout=None, max_envelope_size=None, locale=None): """ resource can be a URL or a ResourceLocator """ if isinstance(resource, str): resource = ResourceLocator(resource) headers = self._build_headers(resource, Session.GetAction, operation_timeout, max_envelope_size, locale) self.service.invoke.set_options(tsoapheaders=headers) return self.service.invoke
[ "def", "get", "(", "self", ",", "resource", ",", "operation_timeout", "=", "None", ",", "max_envelope_size", "=", "None", ",", "locale", "=", "None", ")", ":", "if", "isinstance", "(", "resource", ",", "str", ")", ":", "resource", "=", "ResourceLocator", ...
resource can be a URL or a ResourceLocator
[ "resource", "can", "be", "a", "URL", "or", "a", "ResourceLocator" ]
train
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/session.py#L51-L60
ianclegg/winrmlib
winrmlib/api/session.py
Session.put
def put(self, resource, obj, operation_timeout=None, max_envelope_size=None, locale=None): """ resource can be a URL or a ResourceLocator """ headers = None return self.service.invoke(headers, obj)
python
def put(self, resource, obj, operation_timeout=None, max_envelope_size=None, locale=None): """ resource can be a URL or a ResourceLocator """ headers = None return self.service.invoke(headers, obj)
[ "def", "put", "(", "self", ",", "resource", ",", "obj", ",", "operation_timeout", "=", "None", ",", "max_envelope_size", "=", "None", ",", "locale", "=", "None", ")", ":", "headers", "=", "None", "return", "self", ".", "service", ".", "invoke", "(", "h...
resource can be a URL or a ResourceLocator
[ "resource", "can", "be", "a", "URL", "or", "a", "ResourceLocator" ]
train
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/session.py#L62-L68
ianclegg/winrmlib
winrmlib/api/session.py
Session.delete
def delete(self, resource, operation_timeout=None, max_envelope_size=None, locale=None): """ resource can be a URL or a ResourceLocator """ if isinstance(resource, str): resource = ResourceLocator(resource) headers = self._build_headers(resource, Session.DeleteAction, operation_timeout, max_envelope_size, locale) return self.service.invoke(headers, None)
python
def delete(self, resource, operation_timeout=None, max_envelope_size=None, locale=None): """ resource can be a URL or a ResourceLocator """ if isinstance(resource, str): resource = ResourceLocator(resource) headers = self._build_headers(resource, Session.DeleteAction, operation_timeout, max_envelope_size, locale) return self.service.invoke(headers, None)
[ "def", "delete", "(", "self", ",", "resource", ",", "operation_timeout", "=", "None", ",", "max_envelope_size", "=", "None", ",", "locale", "=", "None", ")", ":", "if", "isinstance", "(", "resource", ",", "str", ")", ":", "resource", "=", "ResourceLocator"...
resource can be a URL or a ResourceLocator
[ "resource", "can", "be", "a", "URL", "or", "a", "ResourceLocator" ]
train
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/session.py#L70-L79
ianclegg/winrmlib
winrmlib/api/session.py
Session.create
def create(self, resource, obj, operation_timeout=None, max_envelope_size=None, locale=None): """ resource can be a URL or a ResourceLocator """ if isinstance(resource, str): resource = ResourceLocator(resource) headers = self._build_headers(resource, Session.CreateAction, operation_timeout, max_envelope_size, locale) return self.service.invoke(headers, obj)
python
def create(self, resource, obj, operation_timeout=None, max_envelope_size=None, locale=None): """ resource can be a URL or a ResourceLocator """ if isinstance(resource, str): resource = ResourceLocator(resource) headers = self._build_headers(resource, Session.CreateAction, operation_timeout, max_envelope_size, locale) return self.service.invoke(headers, obj)
[ "def", "create", "(", "self", ",", "resource", ",", "obj", ",", "operation_timeout", "=", "None", ",", "max_envelope_size", "=", "None", ",", "locale", "=", "None", ")", ":", "if", "isinstance", "(", "resource", ",", "str", ")", ":", "resource", "=", "...
resource can be a URL or a ResourceLocator
[ "resource", "can", "be", "a", "URL", "or", "a", "ResourceLocator" ]
train
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/session.py#L81-L91
ianclegg/winrmlib
winrmlib/api/session.py
Session.command
def command(self, resource, obj, operation_timeout=None, max_envelope_size=None, locale=None): """ resource can be a URL or a ResourceLocator """ if isinstance(resource, str): resource = ResourceLocator(resource) headers = self._build_headers(resource, Session.CommandAction, operation_timeout, max_envelope_size, locale) return self.service.invoke(headers, obj)
python
def command(self, resource, obj, operation_timeout=None, max_envelope_size=None, locale=None): """ resource can be a URL or a ResourceLocator """ if isinstance(resource, str): resource = ResourceLocator(resource) headers = self._build_headers(resource, Session.CommandAction, operation_timeout, max_envelope_size, locale) return self.service.invoke(headers, obj)
[ "def", "command", "(", "self", ",", "resource", ",", "obj", ",", "operation_timeout", "=", "None", ",", "max_envelope_size", "=", "None", ",", "locale", "=", "None", ")", ":", "if", "isinstance", "(", "resource", ",", "str", ")", ":", "resource", "=", ...
resource can be a URL or a ResourceLocator
[ "resource", "can", "be", "a", "URL", "or", "a", "ResourceLocator" ]
train
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/session.py#L93-L103
ianclegg/winrmlib
winrmlib/api/session.py
Session.recieve
def recieve(self, resource, obj, operation_timeout=None, max_envelope_size=None, locale=None): """ resource can be a URL or a ResourceLocator """ if isinstance(resource, str): resource = ResourceLocator(resource) headers = self._build_headers(resource, Session.ReceiveAction, operation_timeout, max_envelope_size, locale) return self.service.invoke(headers, obj)
python
def recieve(self, resource, obj, operation_timeout=None, max_envelope_size=None, locale=None): """ resource can be a URL or a ResourceLocator """ if isinstance(resource, str): resource = ResourceLocator(resource) headers = self._build_headers(resource, Session.ReceiveAction, operation_timeout, max_envelope_size, locale) return self.service.invoke(headers, obj)
[ "def", "recieve", "(", "self", ",", "resource", ",", "obj", ",", "operation_timeout", "=", "None", ",", "max_envelope_size", "=", "None", ",", "locale", "=", "None", ")", ":", "if", "isinstance", "(", "resource", ",", "str", ")", ":", "resource", "=", ...
resource can be a URL or a ResourceLocator
[ "resource", "can", "be", "a", "URL", "or", "a", "ResourceLocator" ]
train
https://github.com/ianclegg/winrmlib/blob/489b3ce5d0e6a9a7301ba5d345ba82fa824c1431/winrmlib/api/session.py#L105-L115
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/analysis_params.py
params.set_default_fig_style
def set_default_fig_style(self): '''default figure size''' plt.rcParams.update({ 'figure.figsize' : [self.frontierswidth/self.inchpercm, self.frontierswidth/self.inchpercm], })
python
def set_default_fig_style(self): '''default figure size''' plt.rcParams.update({ 'figure.figsize' : [self.frontierswidth/self.inchpercm, self.frontierswidth/self.inchpercm], })
[ "def", "set_default_fig_style", "(", "self", ")", ":", "plt", ".", "rcParams", ".", "update", "(", "{", "'figure.figsize'", ":", "[", "self", ".", "frontierswidth", "/", "self", ".", "inchpercm", ",", "self", ".", "frontierswidth", "/", "self", ".", "inchp...
default figure size
[ "default", "figure", "size" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/analysis_params.py#L92-L96
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/analysis_params.py
params.set_large_fig_style
def set_large_fig_style(self): '''twice width figure size''' plt.rcParams.update({ 'figure.figsize' : [self.frontierswidth/self.inchpercm*2, self.frontierswidth/self.inchpercm], })
python
def set_large_fig_style(self): '''twice width figure size''' plt.rcParams.update({ 'figure.figsize' : [self.frontierswidth/self.inchpercm*2, self.frontierswidth/self.inchpercm], })
[ "def", "set_large_fig_style", "(", "self", ")", ":", "plt", ".", "rcParams", ".", "update", "(", "{", "'figure.figsize'", ":", "[", "self", ".", "frontierswidth", "/", "self", ".", "inchpercm", "*", "2", ",", "self", ".", "frontierswidth", "/", "self", "...
twice width figure size
[ "twice", "width", "figure", "size" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/analysis_params.py#L99-L103
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/analysis_params.py
params.set_broad_fig_style
def set_broad_fig_style(self): '''4 times width, 1.5 times height''' plt.rcParams.update({ 'figure.figsize' : [self.frontierswidth/self.inchpercm*4, self.frontierswidth/self.inchpercm*1.5], })
python
def set_broad_fig_style(self): '''4 times width, 1.5 times height''' plt.rcParams.update({ 'figure.figsize' : [self.frontierswidth/self.inchpercm*4, self.frontierswidth/self.inchpercm*1.5], })
[ "def", "set_broad_fig_style", "(", "self", ")", ":", "plt", ".", "rcParams", ".", "update", "(", "{", "'figure.figsize'", ":", "[", "self", ".", "frontierswidth", "/", "self", ".", "inchpercm", "*", "4", ",", "self", ".", "frontierswidth", "/", "self", "...
4 times width, 1.5 times height
[ "4", "times", "width", "1", ".", "5", "times", "height" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/analysis_params.py#L105-L109
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/analysis_params.py
params.set_enormous_fig_style
def set_enormous_fig_style(self): '''2 times width, 2 times height''' plt.rcParams.update({ 'figure.figsize' : [self.frontierswidth/self.inchpercm*2, self.frontierswidth/self.inchpercm*2], })
python
def set_enormous_fig_style(self): '''2 times width, 2 times height''' plt.rcParams.update({ 'figure.figsize' : [self.frontierswidth/self.inchpercm*2, self.frontierswidth/self.inchpercm*2], })
[ "def", "set_enormous_fig_style", "(", "self", ")", ":", "plt", ".", "rcParams", ".", "update", "(", "{", "'figure.figsize'", ":", "[", "self", ".", "frontierswidth", "/", "self", ".", "inchpercm", "*", "2", ",", "self", ".", "frontierswidth", "/", "self", ...
2 times width, 2 times height
[ "2", "times", "width", "2", "times", "height" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/analysis_params.py#L112-L117
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/analysis_params.py
params.set_PLOS_1column_fig_style
def set_PLOS_1column_fig_style(self, ratio=1): '''figure size corresponding to Plos 1 column''' plt.rcParams.update({ 'figure.figsize' : [self.PLOSwidth1Col,self.PLOSwidth1Col*ratio], })
python
def set_PLOS_1column_fig_style(self, ratio=1): '''figure size corresponding to Plos 1 column''' plt.rcParams.update({ 'figure.figsize' : [self.PLOSwidth1Col,self.PLOSwidth1Col*ratio], })
[ "def", "set_PLOS_1column_fig_style", "(", "self", ",", "ratio", "=", "1", ")", ":", "plt", ".", "rcParams", ".", "update", "(", "{", "'figure.figsize'", ":", "[", "self", ".", "PLOSwidth1Col", ",", "self", ".", "PLOSwidth1Col", "*", "ratio", "]", ",", "}...
figure size corresponding to Plos 1 column
[ "figure", "size", "corresponding", "to", "Plos", "1", "column" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/analysis_params.py#L120-L124
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/analysis_params.py
params.set_PLOS_2column_fig_style
def set_PLOS_2column_fig_style(self, ratio=1): '''figure size corresponding to Plos 2 columns''' plt.rcParams.update({ 'figure.figsize' : [self.PLOSwidth2Col, self.PLOSwidth2Col*ratio], })
python
def set_PLOS_2column_fig_style(self, ratio=1): '''figure size corresponding to Plos 2 columns''' plt.rcParams.update({ 'figure.figsize' : [self.PLOSwidth2Col, self.PLOSwidth2Col*ratio], })
[ "def", "set_PLOS_2column_fig_style", "(", "self", ",", "ratio", "=", "1", ")", ":", "plt", ".", "rcParams", ".", "update", "(", "{", "'figure.figsize'", ":", "[", "self", ".", "PLOSwidth2Col", ",", "self", ".", "PLOSwidth2Col", "*", "ratio", "]", ",", "}...
figure size corresponding to Plos 2 columns
[ "figure", "size", "corresponding", "to", "Plos", "2", "columns" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/analysis_params.py#L127-L131
INM-6/hybridLFPy
hybridLFPy/postproc.py
PostProcess.run
def run(self): """ Perform the postprocessing steps, computing compound signals from cell-specific output files. """ if RANK == 0: if 'LFP' in self.savelist: #get the per population LFPs and total LFP from all populations: self.LFPdict, self.LFPsum = self.calc_lfp() self.LFPdictLayer = self.calc_lfp_layer() #save global LFP sum, and from L23E, L4I etc.: f = h5py.File(os.path.join(self.savefolder, self.compound_file.format('LFP') ), 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=self.LFPsum, compression=4) f.close() for key, value in list(self.LFPdictLayer.items()): f = h5py.File(os.path.join(self.populations_path, self.output_file.format(key, 'LFP.h5') ), 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=value, compression=4) f.close() if 'CSD' in self.savelist: #get the per population CSDs and total CSD from all populations: self.CSDdict, self.CSDsum = self.calc_csd() self.CSDdictLayer = self.calc_csd_layer() #save global CSD sum, and from L23E, L4I etc.: f = h5py.File(os.path.join(self.savefolder, self.compound_file.format('CSD')), 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=self.CSDsum, compression=4) f.close() for key, value in list(self.CSDdictLayer.items()): f = h5py.File(os.path.join(self.populations_path, self.output_file.format(key, 'CSD.h5') ), 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=value, compression=4) f.close() else: pass
python
def run(self): """ Perform the postprocessing steps, computing compound signals from cell-specific output files. """ if RANK == 0: if 'LFP' in self.savelist: #get the per population LFPs and total LFP from all populations: self.LFPdict, self.LFPsum = self.calc_lfp() self.LFPdictLayer = self.calc_lfp_layer() #save global LFP sum, and from L23E, L4I etc.: f = h5py.File(os.path.join(self.savefolder, self.compound_file.format('LFP') ), 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=self.LFPsum, compression=4) f.close() for key, value in list(self.LFPdictLayer.items()): f = h5py.File(os.path.join(self.populations_path, self.output_file.format(key, 'LFP.h5') ), 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=value, compression=4) f.close() if 'CSD' in self.savelist: #get the per population CSDs and total CSD from all populations: self.CSDdict, self.CSDsum = self.calc_csd() self.CSDdictLayer = self.calc_csd_layer() #save global CSD sum, and from L23E, L4I etc.: f = h5py.File(os.path.join(self.savefolder, self.compound_file.format('CSD')), 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=self.CSDsum, compression=4) f.close() for key, value in list(self.CSDdictLayer.items()): f = h5py.File(os.path.join(self.populations_path, self.output_file.format(key, 'CSD.h5') ), 'w') f['srate'] = 1E3 / self.dt_output f.create_dataset('data', data=value, compression=4) f.close() else: pass
[ "def", "run", "(", "self", ")", ":", "if", "RANK", "==", "0", ":", "if", "'LFP'", "in", "self", ".", "savelist", ":", "#get the per population LFPs and total LFP from all populations:", "self", ".", "LFPdict", ",", "self", ".", "LFPsum", "=", "self", ".", "c...
Perform the postprocessing steps, computing compound signals from cell-specific output files.
[ "Perform", "the", "postprocessing", "steps", "computing", "compound", "signals", "from", "cell", "-", "specific", "output", "files", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/postproc.py#L104-L154
INM-6/hybridLFPy
hybridLFPy/postproc.py
PostProcess._set_up_savefolder
def _set_up_savefolder(self): """ Create catalogs for different file output to clean up savefolder. """ if not os.path.isdir(self.cells_path): os.mkdir(self.cells_path) if not os.path.isdir(self.figures_path): os.mkdir(self.figures_path) if not os.path.isdir(self.populations_path): os.mkdir(self.populations_path)
python
def _set_up_savefolder(self): """ Create catalogs for different file output to clean up savefolder. """ if not os.path.isdir(self.cells_path): os.mkdir(self.cells_path) if not os.path.isdir(self.figures_path): os.mkdir(self.figures_path) if not os.path.isdir(self.populations_path): os.mkdir(self.populations_path)
[ "def", "_set_up_savefolder", "(", "self", ")", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "self", ".", "cells_path", ")", ":", "os", ".", "mkdir", "(", "self", ".", "cells_path", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", ...
Create catalogs for different file output to clean up savefolder.
[ "Create", "catalogs", "for", "different", "file", "output", "to", "clean", "up", "savefolder", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/postproc.py#L160-L170
INM-6/hybridLFPy
hybridLFPy/postproc.py
PostProcess.calc_lfp
def calc_lfp(self): """ Sum all the LFP contributions from every cell type. """ LFParray = np.array([]) LFPdict = {} i = 0 for y in self.y: fil = os.path.join(self.populations_path, self.output_file.format(y, 'LFP.h5')) f = h5py.File(fil) if i == 0: LFParray = np.zeros((len(self.y), f['data'].shape[0], f['data'].shape[1])) #fill in LFParray[i, ] = f['data'].value LFPdict.update({y : f['data'].value}) f.close() i += 1 return LFPdict, LFParray.sum(axis=0)
python
def calc_lfp(self): """ Sum all the LFP contributions from every cell type. """ LFParray = np.array([]) LFPdict = {} i = 0 for y in self.y: fil = os.path.join(self.populations_path, self.output_file.format(y, 'LFP.h5')) f = h5py.File(fil) if i == 0: LFParray = np.zeros((len(self.y), f['data'].shape[0], f['data'].shape[1])) #fill in LFParray[i, ] = f['data'].value LFPdict.update({y : f['data'].value}) f.close() i += 1 return LFPdict, LFParray.sum(axis=0)
[ "def", "calc_lfp", "(", "self", ")", ":", "LFParray", "=", "np", ".", "array", "(", "[", "]", ")", "LFPdict", "=", "{", "}", "i", "=", "0", "for", "y", "in", "self", ".", "y", ":", "fil", "=", "os", ".", "path", ".", "join", "(", "self", "....
Sum all the LFP contributions from every cell type.
[ "Sum", "all", "the", "LFP", "contributions", "from", "every", "cell", "type", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/postproc.py#L173-L200
INM-6/hybridLFPy
hybridLFPy/postproc.py
PostProcess.calc_csd
def calc_csd(self): """ Sum all the CSD contributions from every layer. """ CSDarray = np.array([]) CSDdict = {} i = 0 for y in self.y: fil = os.path.join(self.populations_path, self.output_file.format(y, 'CSD.h5')) f = h5py.File(fil) if i == 0: CSDarray = np.zeros((len(self.y), f['data'].shape[0], f['data'].shape[1])) #fill in CSDarray[i, ] = f['data'].value CSDdict.update({y : f['data'].value}) f.close() i += 1 return CSDdict, CSDarray.sum(axis=0)
python
def calc_csd(self): """ Sum all the CSD contributions from every layer. """ CSDarray = np.array([]) CSDdict = {} i = 0 for y in self.y: fil = os.path.join(self.populations_path, self.output_file.format(y, 'CSD.h5')) f = h5py.File(fil) if i == 0: CSDarray = np.zeros((len(self.y), f['data'].shape[0], f['data'].shape[1])) #fill in CSDarray[i, ] = f['data'].value CSDdict.update({y : f['data'].value}) f.close() i += 1 return CSDdict, CSDarray.sum(axis=0)
[ "def", "calc_csd", "(", "self", ")", ":", "CSDarray", "=", "np", ".", "array", "(", "[", "]", ")", "CSDdict", "=", "{", "}", "i", "=", "0", "for", "y", "in", "self", ".", "y", ":", "fil", "=", "os", ".", "path", ".", "join", "(", "self", "....
Sum all the CSD contributions from every layer.
[ "Sum", "all", "the", "CSD", "contributions", "from", "every", "layer", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/postproc.py#L203-L230
INM-6/hybridLFPy
hybridLFPy/postproc.py
PostProcess.calc_lfp_layer
def calc_lfp_layer(self): """ Calculate the LFP from concatenated subpopulations residing in a certain layer, e.g all L4E pops are summed, according to the `mapping_Yy` attribute of the `hybridLFPy.Population` objects. """ LFPdict = {} lastY = None for Y, y in self.mapping_Yy: if lastY != Y: try: LFPdict.update({Y : self.LFPdict[y]}) except KeyError: pass else: try: LFPdict[Y] += self.LFPdict[y] except KeyError: pass lastY = Y return LFPdict
python
def calc_lfp_layer(self): """ Calculate the LFP from concatenated subpopulations residing in a certain layer, e.g all L4E pops are summed, according to the `mapping_Yy` attribute of the `hybridLFPy.Population` objects. """ LFPdict = {} lastY = None for Y, y in self.mapping_Yy: if lastY != Y: try: LFPdict.update({Y : self.LFPdict[y]}) except KeyError: pass else: try: LFPdict[Y] += self.LFPdict[y] except KeyError: pass lastY = Y return LFPdict
[ "def", "calc_lfp_layer", "(", "self", ")", ":", "LFPdict", "=", "{", "}", "lastY", "=", "None", "for", "Y", ",", "y", "in", "self", ".", "mapping_Yy", ":", "if", "lastY", "!=", "Y", ":", "try", ":", "LFPdict", ".", "update", "(", "{", "Y", ":", ...
Calculate the LFP from concatenated subpopulations residing in a certain layer, e.g all L4E pops are summed, according to the `mapping_Yy` attribute of the `hybridLFPy.Population` objects.
[ "Calculate", "the", "LFP", "from", "concatenated", "subpopulations", "residing", "in", "a", "certain", "layer", "e", ".", "g", "all", "L4E", "pops", "are", "summed", "according", "to", "the", "mapping_Yy", "attribute", "of", "the", "hybridLFPy", ".", "Populati...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/postproc.py#L233-L255
INM-6/hybridLFPy
hybridLFPy/postproc.py
PostProcess.calc_csd_layer
def calc_csd_layer(self): """ Calculate the CSD from concatenated subpopulations residing in a certain layer, e.g all L4E pops are summed, according to the `mapping_Yy` attribute of the `hybridLFPy.Population` objects. """ CSDdict = {} lastY = None for Y, y in self.mapping_Yy: if lastY != Y: try: CSDdict.update({Y : self.CSDdict[y]}) except KeyError: pass else: try: CSDdict[Y] += self.CSDdict[y] except KeyError: pass lastY = Y return CSDdict
python
def calc_csd_layer(self): """ Calculate the CSD from concatenated subpopulations residing in a certain layer, e.g all L4E pops are summed, according to the `mapping_Yy` attribute of the `hybridLFPy.Population` objects. """ CSDdict = {} lastY = None for Y, y in self.mapping_Yy: if lastY != Y: try: CSDdict.update({Y : self.CSDdict[y]}) except KeyError: pass else: try: CSDdict[Y] += self.CSDdict[y] except KeyError: pass lastY = Y return CSDdict
[ "def", "calc_csd_layer", "(", "self", ")", ":", "CSDdict", "=", "{", "}", "lastY", "=", "None", "for", "Y", ",", "y", "in", "self", ".", "mapping_Yy", ":", "if", "lastY", "!=", "Y", ":", "try", ":", "CSDdict", ".", "update", "(", "{", "Y", ":", ...
Calculate the CSD from concatenated subpopulations residing in a certain layer, e.g all L4E pops are summed, according to the `mapping_Yy` attribute of the `hybridLFPy.Population` objects.
[ "Calculate", "the", "CSD", "from", "concatenated", "subpopulations", "residing", "in", "a", "certain", "layer", "e", ".", "g", "all", "L4E", "pops", "are", "summed", "according", "to", "the", "mapping_Yy", "attribute", "of", "the", "hybridLFPy", ".", "Populati...
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/postproc.py#L258-L280
INM-6/hybridLFPy
hybridLFPy/postproc.py
PostProcess.create_tar_archive
def create_tar_archive(self): """ Create a tar archive of the main simulation outputs. """ #file filter EXCLUDE_FILES = glob.glob(os.path.join(self.savefolder, 'cells')) EXCLUDE_FILES += glob.glob(os.path.join(self.savefolder, 'populations', 'subsamples')) EXCLUDE_FILES += glob.glob(os.path.join(self.savefolder, 'raw_nest_output')) def filter_function(tarinfo): print(tarinfo.name) if len([f for f in EXCLUDE_FILES if os.path.split(tarinfo.name)[-1] in os.path.split(f)[-1]]) > 0 or \ len([f for f in EXCLUDE_FILES if os.path.split(tarinfo.path)[-1] in os.path.split(f)[-1]]) > 0: print('excluding %s' % tarinfo.name) return None else: return tarinfo if RANK == 0: print('creating archive %s' % (self.savefolder + '.tar')) #open file f = tarfile.open(self.savefolder + '.tar', 'w') #avoid adding files to repo as /scratch/$USER/hybrid_model/... arcname = os.path.split(self.savefolder)[-1] f.add(name=self.savefolder, arcname=arcname, filter=filter_function) f.close() #resync COMM.Barrier()
python
def create_tar_archive(self): """ Create a tar archive of the main simulation outputs. """ #file filter EXCLUDE_FILES = glob.glob(os.path.join(self.savefolder, 'cells')) EXCLUDE_FILES += glob.glob(os.path.join(self.savefolder, 'populations', 'subsamples')) EXCLUDE_FILES += glob.glob(os.path.join(self.savefolder, 'raw_nest_output')) def filter_function(tarinfo): print(tarinfo.name) if len([f for f in EXCLUDE_FILES if os.path.split(tarinfo.name)[-1] in os.path.split(f)[-1]]) > 0 or \ len([f for f in EXCLUDE_FILES if os.path.split(tarinfo.path)[-1] in os.path.split(f)[-1]]) > 0: print('excluding %s' % tarinfo.name) return None else: return tarinfo if RANK == 0: print('creating archive %s' % (self.savefolder + '.tar')) #open file f = tarfile.open(self.savefolder + '.tar', 'w') #avoid adding files to repo as /scratch/$USER/hybrid_model/... arcname = os.path.split(self.savefolder)[-1] f.add(name=self.savefolder, arcname=arcname, filter=filter_function) f.close() #resync COMM.Barrier()
[ "def", "create_tar_archive", "(", "self", ")", ":", "#file filter", "EXCLUDE_FILES", "=", "glob", ".", "glob", "(", "os", ".", "path", ".", "join", "(", "self", ".", "savefolder", ",", "'cells'", ")", ")", "EXCLUDE_FILES", "+=", "glob", ".", "glob", "(",...
Create a tar archive of the main simulation outputs.
[ "Create", "a", "tar", "archive", "of", "the", "main", "simulation", "outputs", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/hybridLFPy/postproc.py#L283-L318
staticdev/django-sorting-bootstrap
sorting_bootstrap/sort.py
sort_queryset
def sort_queryset(queryset, request, context=None): """ Returns a sorted queryset The context argument is only used in the template tag """ sort_by = request.GET.get('sort_by') if sort_by: if sort_by in [el.name for el in queryset.model._meta.fields]: queryset = queryset.order_by(sort_by) else: if sort_by in request.session: sort_by = request.session[sort_by] try: queryset = queryset.order_by(sort_by) except: raise # added else to fix a bug when using changelist # TODO: use less ifs and more standard sorting elif context is not None: # sorted ascending if sort_by[0] != '-': sort_by = context['cl'].list_display[int(sort_by) - 1] # sorted descending else: sort_by = '-' + context['cl'].list_display[abs(int(sort_by)) - 1] queryset = queryset.order_by(sort_by) return queryset
python
def sort_queryset(queryset, request, context=None): """ Returns a sorted queryset The context argument is only used in the template tag """ sort_by = request.GET.get('sort_by') if sort_by: if sort_by in [el.name for el in queryset.model._meta.fields]: queryset = queryset.order_by(sort_by) else: if sort_by in request.session: sort_by = request.session[sort_by] try: queryset = queryset.order_by(sort_by) except: raise # added else to fix a bug when using changelist # TODO: use less ifs and more standard sorting elif context is not None: # sorted ascending if sort_by[0] != '-': sort_by = context['cl'].list_display[int(sort_by) - 1] # sorted descending else: sort_by = '-' + context['cl'].list_display[abs(int(sort_by)) - 1] queryset = queryset.order_by(sort_by) return queryset
[ "def", "sort_queryset", "(", "queryset", ",", "request", ",", "context", "=", "None", ")", ":", "sort_by", "=", "request", ".", "GET", ".", "get", "(", "'sort_by'", ")", "if", "sort_by", ":", "if", "sort_by", "in", "[", "el", ".", "name", "for", "el"...
Returns a sorted queryset The context argument is only used in the template tag
[ "Returns", "a", "sorted", "queryset", "The", "context", "argument", "is", "only", "used", "in", "the", "template", "tag" ]
train
https://github.com/staticdev/django-sorting-bootstrap/blob/cfdc6e671b1b57aad04e44b041b9df10ee8288d3/sorting_bootstrap/sort.py#L1-L26
tkf/rash
rash/utils/iterutils.py
include_before
def include_before(predicate, num, iterative): """ Return elements in `iterative` including `num`-before elements. >>> list(include_before(lambda x: x == 'd', 2, 'abcded')) ['b', 'c', 'd', 'e', 'd'] """ (it0, it1) = itertools.tee(iterative) ps = _backward_shifted_predicate(predicate, num, it1) return (e for (e, p) in zip(it0, ps) if p)
python
def include_before(predicate, num, iterative): """ Return elements in `iterative` including `num`-before elements. >>> list(include_before(lambda x: x == 'd', 2, 'abcded')) ['b', 'c', 'd', 'e', 'd'] """ (it0, it1) = itertools.tee(iterative) ps = _backward_shifted_predicate(predicate, num, it1) return (e for (e, p) in zip(it0, ps) if p)
[ "def", "include_before", "(", "predicate", ",", "num", ",", "iterative", ")", ":", "(", "it0", ",", "it1", ")", "=", "itertools", ".", "tee", "(", "iterative", ")", "ps", "=", "_backward_shifted_predicate", "(", "predicate", ",", "num", ",", "it1", ")", ...
Return elements in `iterative` including `num`-before elements. >>> list(include_before(lambda x: x == 'd', 2, 'abcded')) ['b', 'c', 'd', 'e', 'd']
[ "Return", "elements", "in", "iterative", "including", "num", "-", "before", "elements", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/utils/iterutils.py#L71-L81
tkf/rash
rash/utils/iterutils.py
include_after
def include_after(predicate, num, iterative): """ Return elements in `iterative` including `num`-after elements. >>> list(include_after(lambda x: x == 'b', 2, 'abcbcde')) ['b', 'c', 'b', 'c', 'd'] """ (it0, it1) = itertools.tee(iterative) ps = _forward_shifted_predicate(predicate, num, it1) return (e for (e, p) in zip(it0, ps) if p)
python
def include_after(predicate, num, iterative): """ Return elements in `iterative` including `num`-after elements. >>> list(include_after(lambda x: x == 'b', 2, 'abcbcde')) ['b', 'c', 'b', 'c', 'd'] """ (it0, it1) = itertools.tee(iterative) ps = _forward_shifted_predicate(predicate, num, it1) return (e for (e, p) in zip(it0, ps) if p)
[ "def", "include_after", "(", "predicate", ",", "num", ",", "iterative", ")", ":", "(", "it0", ",", "it1", ")", "=", "itertools", ".", "tee", "(", "iterative", ")", "ps", "=", "_forward_shifted_predicate", "(", "predicate", ",", "num", ",", "it1", ")", ...
Return elements in `iterative` including `num`-after elements. >>> list(include_after(lambda x: x == 'b', 2, 'abcbcde')) ['b', 'c', 'b', 'c', 'd']
[ "Return", "elements", "in", "iterative", "including", "num", "-", "after", "elements", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/utils/iterutils.py#L84-L94
tkf/rash
rash/utils/iterutils.py
include_context
def include_context(predicate, num, iterative): """ Return elements in `iterative` including `num` before and after elements. >>> ''.join(include_context(lambda x: x == '!', 2, 'bb!aa__bb!aa')) 'bb!aabb!aa' """ (it0, it1, it2) = itertools.tee(iterative, 3) psf = _forward_shifted_predicate(predicate, num, it1) psb = _backward_shifted_predicate(predicate, num, it2) return (e for (e, pf, pb) in zip(it0, psf, psb) if pf or pb)
python
def include_context(predicate, num, iterative): """ Return elements in `iterative` including `num` before and after elements. >>> ''.join(include_context(lambda x: x == '!', 2, 'bb!aa__bb!aa')) 'bb!aabb!aa' """ (it0, it1, it2) = itertools.tee(iterative, 3) psf = _forward_shifted_predicate(predicate, num, it1) psb = _backward_shifted_predicate(predicate, num, it2) return (e for (e, pf, pb) in zip(it0, psf, psb) if pf or pb)
[ "def", "include_context", "(", "predicate", ",", "num", ",", "iterative", ")", ":", "(", "it0", ",", "it1", ",", "it2", ")", "=", "itertools", ".", "tee", "(", "iterative", ",", "3", ")", "psf", "=", "_forward_shifted_predicate", "(", "predicate", ",", ...
Return elements in `iterative` including `num` before and after elements. >>> ''.join(include_context(lambda x: x == '!', 2, 'bb!aa__bb!aa')) 'bb!aabb!aa'
[ "Return", "elements", "in", "iterative", "including", "num", "before", "and", "after", "elements", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/utils/iterutils.py#L97-L108
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/analysis.py
create_downsampled_data
def create_downsampled_data(params): ''' Creates one CSD or LFP file with downsampled data per cell type ''' maxsamples = 1 for data_type in ['LFP','CSD']: if RANK == 0: if not os.path.isdir(os.path.join(params.savefolder, 'populations', 'subsamples')): os.mkdir((os.path.join(params.savefolder,'populations','subsamples'))) COMM.Barrier() try: assert(ana_params.scaling <= params.recordSingleContribFrac) except AssertionError as ae: raise ae, 'scaling parameter must be less than simulation recordSingleContribFrac' samples = int(1. / ana_params.scaling) if samples > maxsamples: samples = maxsamples COUNTER = 0 for j, layer in enumerate(params.y_in_Y): # loop over layers for k, pop in enumerate(layer): # loop over populations for i, y in enumerate(pop): # loop over cell types if COUNTER % SIZE == RANK: # Load data fname = os.path.join(params.savefolder, 'populations', '%s_%ss.h5' \ % (y, data_type)) f = h5py.File(fname) print 'Load %s' % str(f.filename) raw_data = f['data'].value srate = f['srate'].value f.close() ## shuffle data #np.random.shuffle(raw_data) # sample size N = int(params.N_y[np.array(params.y) == y]*ana_params.scaling) try: assert(N <= raw_data.shape[0]) except AssertionError as ae: raise ae, 'shape mismatch with sample size' for sample in range(samples): # loop over samples # slice data data = raw_data[sample*N:(sample+1)*N] # create cell resolved file fname = os.path.join(params.savefolder,'populations','subsamples', '%s_%ss_%i_%i.h5' \ % (y, data_type, ana_params.scaling*100, sample)) f = h5py.File(fname, 'w') print 'Write %s' % str(f.filename) f['data'] = data f['srate'] = srate f.close() # create cell type resolved file fname = os.path.join(params.savefolder,'populations','subsamples', '%s_population_%s_%i_%i.h5' \ % (y,data_type,ana_params.scaling*100,sample)) f = h5py.File(fname, 'w') print 'Write %s' % str(f.filename) f['data'] = data.sum(axis=0) f['srate'] = srate f.close() COUNTER += 1 COMM.Barrier() f = h5py.File(os.path.join(params.savefolder,'populations', '%s_%ss.h5' % (y,data_type)), 'r') datashape = f['data'].shape f.close() COUNTER = 0 for sample in range(samples): # loop over samples if COUNTER % SIZE == RANK: # initialize full sum signal data_full = np.zeros(datashape[1:]) for j,layer in enumerate(params.y_in_Y): # loop over layers for k,pop in enumerate(layer): # loop over populations # initialize population resolved sum signal data_Y = np.zeros(datashape[1:]) for i,y in enumerate(pop): # loop over cell types # Load data fname = os.path.join(params.savefolder, 'populations', 'subsamples', '%s_population_%s_%i_%i.h5' \ % (y, data_type, ana_params.scaling*100, sample)) f = h5py.File(fname, 'r') # Update population sum: data_Y += f['data'].value srate = f['srate'].value f.close() # write population sum fname = os.path.join(params.savefolder,'populations','subsamples', '%s_population_%s_%i_%i.h5' \ % (params.Y[2*j+k], data_type, ana_params.scaling*100, sample)) f = h5py.File(fname,'w') print 'Write %s' % str(f.filename) f['data'] = data_Y f['srate'] = srate f.close() # update full sum data_full += data_Y # write sum fname = os.path.join(params.savefolder,'populations','subsamples', '%ssum_%i_%i.h5' % (data_type,ana_params.scaling*100,sample)) f = h5py.File(fname,'w') print 'Write %s' % str(f.filename) f['data'] = data_full f['srate'] = srate f.close() COUNTER += 1 COMM.Barrier()
python
def create_downsampled_data(params): ''' Creates one CSD or LFP file with downsampled data per cell type ''' maxsamples = 1 for data_type in ['LFP','CSD']: if RANK == 0: if not os.path.isdir(os.path.join(params.savefolder, 'populations', 'subsamples')): os.mkdir((os.path.join(params.savefolder,'populations','subsamples'))) COMM.Barrier() try: assert(ana_params.scaling <= params.recordSingleContribFrac) except AssertionError as ae: raise ae, 'scaling parameter must be less than simulation recordSingleContribFrac' samples = int(1. / ana_params.scaling) if samples > maxsamples: samples = maxsamples COUNTER = 0 for j, layer in enumerate(params.y_in_Y): # loop over layers for k, pop in enumerate(layer): # loop over populations for i, y in enumerate(pop): # loop over cell types if COUNTER % SIZE == RANK: # Load data fname = os.path.join(params.savefolder, 'populations', '%s_%ss.h5' \ % (y, data_type)) f = h5py.File(fname) print 'Load %s' % str(f.filename) raw_data = f['data'].value srate = f['srate'].value f.close() ## shuffle data #np.random.shuffle(raw_data) # sample size N = int(params.N_y[np.array(params.y) == y]*ana_params.scaling) try: assert(N <= raw_data.shape[0]) except AssertionError as ae: raise ae, 'shape mismatch with sample size' for sample in range(samples): # loop over samples # slice data data = raw_data[sample*N:(sample+1)*N] # create cell resolved file fname = os.path.join(params.savefolder,'populations','subsamples', '%s_%ss_%i_%i.h5' \ % (y, data_type, ana_params.scaling*100, sample)) f = h5py.File(fname, 'w') print 'Write %s' % str(f.filename) f['data'] = data f['srate'] = srate f.close() # create cell type resolved file fname = os.path.join(params.savefolder,'populations','subsamples', '%s_population_%s_%i_%i.h5' \ % (y,data_type,ana_params.scaling*100,sample)) f = h5py.File(fname, 'w') print 'Write %s' % str(f.filename) f['data'] = data.sum(axis=0) f['srate'] = srate f.close() COUNTER += 1 COMM.Barrier() f = h5py.File(os.path.join(params.savefolder,'populations', '%s_%ss.h5' % (y,data_type)), 'r') datashape = f['data'].shape f.close() COUNTER = 0 for sample in range(samples): # loop over samples if COUNTER % SIZE == RANK: # initialize full sum signal data_full = np.zeros(datashape[1:]) for j,layer in enumerate(params.y_in_Y): # loop over layers for k,pop in enumerate(layer): # loop over populations # initialize population resolved sum signal data_Y = np.zeros(datashape[1:]) for i,y in enumerate(pop): # loop over cell types # Load data fname = os.path.join(params.savefolder, 'populations', 'subsamples', '%s_population_%s_%i_%i.h5' \ % (y, data_type, ana_params.scaling*100, sample)) f = h5py.File(fname, 'r') # Update population sum: data_Y += f['data'].value srate = f['srate'].value f.close() # write population sum fname = os.path.join(params.savefolder,'populations','subsamples', '%s_population_%s_%i_%i.h5' \ % (params.Y[2*j+k], data_type, ana_params.scaling*100, sample)) f = h5py.File(fname,'w') print 'Write %s' % str(f.filename) f['data'] = data_Y f['srate'] = srate f.close() # update full sum data_full += data_Y # write sum fname = os.path.join(params.savefolder,'populations','subsamples', '%ssum_%i_%i.h5' % (data_type,ana_params.scaling*100,sample)) f = h5py.File(fname,'w') print 'Write %s' % str(f.filename) f['data'] = data_full f['srate'] = srate f.close() COUNTER += 1 COMM.Barrier()
[ "def", "create_downsampled_data", "(", "params", ")", ":", "maxsamples", "=", "1", "for", "data_type", "in", "[", "'LFP'", ",", "'CSD'", "]", ":", "if", "RANK", "==", "0", ":", "if", "not", "os", ".", "path", ".", "isdir", "(", "os", ".", "path", "...
Creates one CSD or LFP file with downsampled data per cell type
[ "Creates", "one", "CSD", "or", "LFP", "file", "with", "downsampled", "data", "per", "cell", "type" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/analysis.py#L42-L170
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/analysis.py
calc_signal_power
def calc_signal_power(params): ''' calculates power spectrum of sum signal for all channels ''' for i, data_type in enumerate(['CSD','LFP','CSD_10_0', 'LFP_10_0']): if i % SIZE == RANK: # Load data if data_type in ['CSD','LFP']: fname=os.path.join(params.savefolder, data_type+'sum.h5') else: fname=os.path.join(params.populations_path, 'subsamples', str.split(data_type,'_')[0] + 'sum_' + str.split(data_type,'_')[1] + '_' + str.split(data_type,'_')[2] + '.h5') #open file f = h5py.File(fname) data = f['data'].value srate = f['srate'].value tvec = np.arange(data.shape[1]) * 1000. / srate # slice slica = (tvec >= ana_params.transient) data = data[:,slica] # subtract mean dataT = data.T - data.mean(axis=1) data = dataT.T f.close() #extract PSD PSD=[] for i in np.arange(len(params.electrodeParams['z'])): if ana_params.mlab: Pxx, freqs=plt.mlab.psd(data[i], NFFT=ana_params.NFFT, Fs=srate, noverlap=ana_params.noverlap, window=ana_params.window) else: [freqs, Pxx] = hlp.powerspec([data[i]], tbin= 1., Df=ana_params.Df, pointProcess=False) mask = np.where(freqs >= 0.) freqs = freqs[mask] Pxx = Pxx.flatten() Pxx = Pxx[mask] Pxx = Pxx/tvec[tvec >= ana_params.transient].size**2 PSD +=[Pxx.flatten()] PSD=np.array(PSD) # Save data f = h5py.File(os.path.join(params.savefolder, ana_params.analysis_folder, data_type + ana_params.fname_psd),'w') f['freqs']=freqs f['psd']=PSD f['transient']=ana_params.transient f['mlab']=ana_params.mlab f['NFFT']=ana_params.NFFT f['noverlap']=ana_params.noverlap f['window']=str(ana_params.window) f['Df']=str(ana_params.Df) f.close() return
python
def calc_signal_power(params): ''' calculates power spectrum of sum signal for all channels ''' for i, data_type in enumerate(['CSD','LFP','CSD_10_0', 'LFP_10_0']): if i % SIZE == RANK: # Load data if data_type in ['CSD','LFP']: fname=os.path.join(params.savefolder, data_type+'sum.h5') else: fname=os.path.join(params.populations_path, 'subsamples', str.split(data_type,'_')[0] + 'sum_' + str.split(data_type,'_')[1] + '_' + str.split(data_type,'_')[2] + '.h5') #open file f = h5py.File(fname) data = f['data'].value srate = f['srate'].value tvec = np.arange(data.shape[1]) * 1000. / srate # slice slica = (tvec >= ana_params.transient) data = data[:,slica] # subtract mean dataT = data.T - data.mean(axis=1) data = dataT.T f.close() #extract PSD PSD=[] for i in np.arange(len(params.electrodeParams['z'])): if ana_params.mlab: Pxx, freqs=plt.mlab.psd(data[i], NFFT=ana_params.NFFT, Fs=srate, noverlap=ana_params.noverlap, window=ana_params.window) else: [freqs, Pxx] = hlp.powerspec([data[i]], tbin= 1., Df=ana_params.Df, pointProcess=False) mask = np.where(freqs >= 0.) freqs = freqs[mask] Pxx = Pxx.flatten() Pxx = Pxx[mask] Pxx = Pxx/tvec[tvec >= ana_params.transient].size**2 PSD +=[Pxx.flatten()] PSD=np.array(PSD) # Save data f = h5py.File(os.path.join(params.savefolder, ana_params.analysis_folder, data_type + ana_params.fname_psd),'w') f['freqs']=freqs f['psd']=PSD f['transient']=ana_params.transient f['mlab']=ana_params.mlab f['NFFT']=ana_params.NFFT f['noverlap']=ana_params.noverlap f['window']=str(ana_params.window) f['Df']=str(ana_params.Df) f.close() return
[ "def", "calc_signal_power", "(", "params", ")", ":", "for", "i", ",", "data_type", "in", "enumerate", "(", "[", "'CSD'", ",", "'LFP'", ",", "'CSD_10_0'", ",", "'LFP_10_0'", "]", ")", ":", "if", "i", "%", "SIZE", "==", "RANK", ":", "# Load data", "if", ...
calculates power spectrum of sum signal for all channels
[ "calculates", "power", "spectrum", "of", "sum", "signal", "for", "all", "channels" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/analysis.py#L174-L239
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/analysis.py
calc_uncorrelated_signal_power
def calc_uncorrelated_signal_power(params): '''This function calculates the depth-resolved power spectrum of signals without taking into account any cross-correlation.''' for i, data_type in enumerate(['LFP','CSD']): if i % SIZE == RANK: # Determine size of PSD matrix f = h5py.File(os.path.join(params.savefolder, data_type + 'sum.h5'),'r') data = f['data'].value srate = f['srate'].value if ana_params.mlab: Psum, freqs = plt.mlab.psd(data[0], NFFT=ana_params.NFFT, Fs=srate, noverlap=ana_params.noverlap, window=ana_params.window) else: [freqs, Psum] = hlp.powerspec([data[0]], tbin= 1./srate*1000., Df=ana_params.Df, pointProcess=False) f.close() P = np.zeros((data.shape[0],Psum.shape[0])) for y in params.y: print 'processing ', y # Load data f = h5py.File(os.path.join(params.populations_path, '%s_%ss' % (y,data_type) + '.h5'),'r') data_y = f['data'].value[:,:, ana_params.transient:] # subtract mean for j in range(len(data_y)): data_yT = data_y[j].T - data_y[j].mean(axis=1) data_y[j] = data_yT.T srate = f['srate'].value tvec = np.arange(data_y.shape[2]) * 1000. / srate f.close() for j in range(len(data_y)): # loop over cells if ana_params.mlab: for ch in range(len(params.electrodeParams['z'])): # loop over channels P_j_ch, freqs = plt.mlab.psd(data_y[j,ch], NFFT=ana_params.NFFT, Fs=srate, noverlap=ana_params.noverlap, window=ana_params.window) P[ch] += P_j_ch else: [freqs, P_j] = hlp.powerspec(data_y[j], tbin= 1./srate*1000., Df=ana_params.Df, pointProcess=False) mask = np.where(freqs >= 0.) freqs = freqs[mask] P_j = P_j[:,mask][:,0,:] P_j = P_j/tvec[tvec >= ana_params.transient].size**2 P += P_j #rescale PSD as they may be computed from a fraction of single cell LFPs P /= params.recordSingleContribFrac # Save data f = h5py.File(os.path.join(params.savefolder, ana_params.analysis_folder, data_type + ana_params.fname_psd_uncorr),'w') f['freqs']=freqs f['psd']=P f['transient']=ana_params.transient f['mlab']=ana_params.mlab f['NFFT']=ana_params.NFFT f['noverlap']=ana_params.noverlap f['window']=str(ana_params.window) f['Df']=str(ana_params.Df) f.close() return
python
def calc_uncorrelated_signal_power(params): '''This function calculates the depth-resolved power spectrum of signals without taking into account any cross-correlation.''' for i, data_type in enumerate(['LFP','CSD']): if i % SIZE == RANK: # Determine size of PSD matrix f = h5py.File(os.path.join(params.savefolder, data_type + 'sum.h5'),'r') data = f['data'].value srate = f['srate'].value if ana_params.mlab: Psum, freqs = plt.mlab.psd(data[0], NFFT=ana_params.NFFT, Fs=srate, noverlap=ana_params.noverlap, window=ana_params.window) else: [freqs, Psum] = hlp.powerspec([data[0]], tbin= 1./srate*1000., Df=ana_params.Df, pointProcess=False) f.close() P = np.zeros((data.shape[0],Psum.shape[0])) for y in params.y: print 'processing ', y # Load data f = h5py.File(os.path.join(params.populations_path, '%s_%ss' % (y,data_type) + '.h5'),'r') data_y = f['data'].value[:,:, ana_params.transient:] # subtract mean for j in range(len(data_y)): data_yT = data_y[j].T - data_y[j].mean(axis=1) data_y[j] = data_yT.T srate = f['srate'].value tvec = np.arange(data_y.shape[2]) * 1000. / srate f.close() for j in range(len(data_y)): # loop over cells if ana_params.mlab: for ch in range(len(params.electrodeParams['z'])): # loop over channels P_j_ch, freqs = plt.mlab.psd(data_y[j,ch], NFFT=ana_params.NFFT, Fs=srate, noverlap=ana_params.noverlap, window=ana_params.window) P[ch] += P_j_ch else: [freqs, P_j] = hlp.powerspec(data_y[j], tbin= 1./srate*1000., Df=ana_params.Df, pointProcess=False) mask = np.where(freqs >= 0.) freqs = freqs[mask] P_j = P_j[:,mask][:,0,:] P_j = P_j/tvec[tvec >= ana_params.transient].size**2 P += P_j #rescale PSD as they may be computed from a fraction of single cell LFPs P /= params.recordSingleContribFrac # Save data f = h5py.File(os.path.join(params.savefolder, ana_params.analysis_folder, data_type + ana_params.fname_psd_uncorr),'w') f['freqs']=freqs f['psd']=P f['transient']=ana_params.transient f['mlab']=ana_params.mlab f['NFFT']=ana_params.NFFT f['noverlap']=ana_params.noverlap f['window']=str(ana_params.window) f['Df']=str(ana_params.Df) f.close() return
[ "def", "calc_uncorrelated_signal_power", "(", "params", ")", ":", "for", "i", ",", "data_type", "in", "enumerate", "(", "[", "'LFP'", ",", "'CSD'", "]", ")", ":", "if", "i", "%", "SIZE", "==", "RANK", ":", "# Determine size of PSD matrix", "f", "=", "h5py"...
This function calculates the depth-resolved power spectrum of signals without taking into account any cross-correlation.
[ "This", "function", "calculates", "the", "depth", "-", "resolved", "power", "spectrum", "of", "signals", "without", "taking", "into", "account", "any", "cross", "-", "correlation", "." ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/analysis.py#L242-L316
INM-6/hybridLFPy
examples/Hagen_et_al_2016_cercor/analysis.py
calc_variances
def calc_variances(params): ''' This function calculates the variance of the sum signal and all population-resolved signals ''' depth = params.electrodeParams['z'] ############################ ### CSD ### ############################ for i, data_type in enumerate(['CSD','LFP']): if i % SIZE == RANK: f_out = h5py.File(os.path.join(params.savefolder, ana_params.analysis_folder, data_type + ana_params.fname_variances), 'w') f_out['depths']=depth for celltype in params.y: f_in = h5py.File(os.path.join(params.populations_path, '%s_population_%s' % (celltype,data_type) + '.h5' )) var = f_in['data'].value[:, ana_params.transient:].var(axis=1) f_in.close() f_out[celltype]= var f_in = h5py.File(os.path.join(params.savefolder, data_type + 'sum.h5' )) var= f_in['data'].value[:, ana_params.transient:].var(axis=1) f_in.close() f_out['sum']= var f_out.close() return
python
def calc_variances(params): ''' This function calculates the variance of the sum signal and all population-resolved signals ''' depth = params.electrodeParams['z'] ############################ ### CSD ### ############################ for i, data_type in enumerate(['CSD','LFP']): if i % SIZE == RANK: f_out = h5py.File(os.path.join(params.savefolder, ana_params.analysis_folder, data_type + ana_params.fname_variances), 'w') f_out['depths']=depth for celltype in params.y: f_in = h5py.File(os.path.join(params.populations_path, '%s_population_%s' % (celltype,data_type) + '.h5' )) var = f_in['data'].value[:, ana_params.transient:].var(axis=1) f_in.close() f_out[celltype]= var f_in = h5py.File(os.path.join(params.savefolder, data_type + 'sum.h5' )) var= f_in['data'].value[:, ana_params.transient:].var(axis=1) f_in.close() f_out['sum']= var f_out.close() return
[ "def", "calc_variances", "(", "params", ")", ":", "depth", "=", "params", ".", "electrodeParams", "[", "'z'", "]", "############################", "### CSD ###", "############################", "for", "i", ",", "data_type", "in", "enumerate", "(", "["...
This function calculates the variance of the sum signal and all population-resolved signals
[ "This", "function", "calculates", "the", "variance", "of", "the", "sum", "signal", "and", "all", "population", "-", "resolved", "signals" ]
train
https://github.com/INM-6/hybridLFPy/blob/c38bdf38982c4624c2f70caeb50c40f1d5980abd/examples/Hagen_et_al_2016_cercor/analysis.py#L319-L351
Pytwitcher/pytwitcherapi
docs/source/snippets/chat_myclient1.py
MyIRCClient.on_join
def on_join(self, connection, event): """Handles the join event and greets everone :param connection: the connection with the event :type connection: :class:`irc.client.ServerConnection` :param event: the event to handle :type event: :class:`irc.client.Event` :returns: None """ target = event.source self.privmsg(target, 'Hello %s!' % target)
python
def on_join(self, connection, event): """Handles the join event and greets everone :param connection: the connection with the event :type connection: :class:`irc.client.ServerConnection` :param event: the event to handle :type event: :class:`irc.client.Event` :returns: None """ target = event.source self.privmsg(target, 'Hello %s!' % target)
[ "def", "on_join", "(", "self", ",", "connection", ",", "event", ")", ":", "target", "=", "event", ".", "source", "self", ".", "privmsg", "(", "target", ",", "'Hello %s!'", "%", "target", ")" ]
Handles the join event and greets everone :param connection: the connection with the event :type connection: :class:`irc.client.ServerConnection` :param event: the event to handle :type event: :class:`irc.client.Event` :returns: None
[ "Handles", "the", "join", "event", "and", "greets", "everone" ]
train
https://github.com/Pytwitcher/pytwitcherapi/blob/d53ac5ad5ca113ecb7da542e8cdcbbf8c762b336/docs/source/snippets/chat_myclient1.py#L5-L15
tkf/rash
rash/daemon.py
daemon_run
def daemon_run(no_error, restart, record_path, keep_json, check_duplicate, use_polling, log_level): """ Run RASH index daemon. This daemon watches the directory ``~/.config/rash/data/record`` and translate the JSON files dumped by ``record`` command into sqlite3 DB at ``~/.config/rash/data/db.sqlite``. ``rash init`` will start RASH automatically by default. But there are alternative ways to start daemon. If you want to organize background process in one place such as supervisord_, it is good to add `--restart` option to force stop other daemon process if you accidentally started it in other place. Here is an example of supervisord_ setup:: [program:rash-daemon] command=rash daemon --restart .. _supervisord: http://supervisord.org/ Alternatively, you can call ``rash index`` in cron job to avoid using daemon. It is useful if you want to use RASH on NFS, as it looks like watchdog does not work on NFS.:: # Refresh RASH DB every 10 minutes */10 * * * * rash index """ # Probably it makes sense to use this daemon to provide search # API, so that this daemon is going to be the only process that # is connected to the DB? from .config import ConfigStore from .indexer import Indexer from .log import setup_daemon_log_file, LogForTheFuture from .watchrecord import watch_record, install_sigterm_handler install_sigterm_handler() cfstore = ConfigStore() if log_level: cfstore.daemon_log_level = log_level flogger = LogForTheFuture() # SOMEDAY: make PID checking/writing atomic if possible flogger.debug('Checking old PID file %r.', cfstore.daemon_pid_path) if os.path.exists(cfstore.daemon_pid_path): flogger.debug('Old PID file exists. Reading from it.') with open(cfstore.daemon_pid_path, 'rt') as f: pid = int(f.read().strip()) flogger.debug('Checking if old process with PID=%d is alive', pid) try: os.kill(pid, 0) # check if `pid` is alive except OSError: flogger.info( 'Process with PID=%d is already dead. ' 'So just go on and use this daemon.', pid) else: if restart: flogger.info('Stopping old daemon with PID=%d.', pid) stop_running_daemon(cfstore, pid) else: message = ('There is already a running daemon (PID={0})!' .format(pid)) if no_error: flogger.debug(message) # FIXME: Setup log handler and flogger.dump(). # Note that using the default log file is not safe # since it has already been used. return else: raise RuntimeError(message) else: flogger.debug('Daemon PID file %r does not exists. ' 'So just go on and use this daemon.', cfstore.daemon_pid_path) with open(cfstore.daemon_pid_path, 'w') as f: f.write(str(os.getpid())) try: setup_daemon_log_file(cfstore) flogger.dump() indexer = Indexer(cfstore, check_duplicate, keep_json, record_path) indexer.index_all() watch_record(indexer, use_polling) finally: os.remove(cfstore.daemon_pid_path)
python
def daemon_run(no_error, restart, record_path, keep_json, check_duplicate, use_polling, log_level): """ Run RASH index daemon. This daemon watches the directory ``~/.config/rash/data/record`` and translate the JSON files dumped by ``record`` command into sqlite3 DB at ``~/.config/rash/data/db.sqlite``. ``rash init`` will start RASH automatically by default. But there are alternative ways to start daemon. If you want to organize background process in one place such as supervisord_, it is good to add `--restart` option to force stop other daemon process if you accidentally started it in other place. Here is an example of supervisord_ setup:: [program:rash-daemon] command=rash daemon --restart .. _supervisord: http://supervisord.org/ Alternatively, you can call ``rash index`` in cron job to avoid using daemon. It is useful if you want to use RASH on NFS, as it looks like watchdog does not work on NFS.:: # Refresh RASH DB every 10 minutes */10 * * * * rash index """ # Probably it makes sense to use this daemon to provide search # API, so that this daemon is going to be the only process that # is connected to the DB? from .config import ConfigStore from .indexer import Indexer from .log import setup_daemon_log_file, LogForTheFuture from .watchrecord import watch_record, install_sigterm_handler install_sigterm_handler() cfstore = ConfigStore() if log_level: cfstore.daemon_log_level = log_level flogger = LogForTheFuture() # SOMEDAY: make PID checking/writing atomic if possible flogger.debug('Checking old PID file %r.', cfstore.daemon_pid_path) if os.path.exists(cfstore.daemon_pid_path): flogger.debug('Old PID file exists. Reading from it.') with open(cfstore.daemon_pid_path, 'rt') as f: pid = int(f.read().strip()) flogger.debug('Checking if old process with PID=%d is alive', pid) try: os.kill(pid, 0) # check if `pid` is alive except OSError: flogger.info( 'Process with PID=%d is already dead. ' 'So just go on and use this daemon.', pid) else: if restart: flogger.info('Stopping old daemon with PID=%d.', pid) stop_running_daemon(cfstore, pid) else: message = ('There is already a running daemon (PID={0})!' .format(pid)) if no_error: flogger.debug(message) # FIXME: Setup log handler and flogger.dump(). # Note that using the default log file is not safe # since it has already been used. return else: raise RuntimeError(message) else: flogger.debug('Daemon PID file %r does not exists. ' 'So just go on and use this daemon.', cfstore.daemon_pid_path) with open(cfstore.daemon_pid_path, 'w') as f: f.write(str(os.getpid())) try: setup_daemon_log_file(cfstore) flogger.dump() indexer = Indexer(cfstore, check_duplicate, keep_json, record_path) indexer.index_all() watch_record(indexer, use_polling) finally: os.remove(cfstore.daemon_pid_path)
[ "def", "daemon_run", "(", "no_error", ",", "restart", ",", "record_path", ",", "keep_json", ",", "check_duplicate", ",", "use_polling", ",", "log_level", ")", ":", "# Probably it makes sense to use this daemon to provide search", "# API, so that this daemon is going to be the o...
Run RASH index daemon. This daemon watches the directory ``~/.config/rash/data/record`` and translate the JSON files dumped by ``record`` command into sqlite3 DB at ``~/.config/rash/data/db.sqlite``. ``rash init`` will start RASH automatically by default. But there are alternative ways to start daemon. If you want to organize background process in one place such as supervisord_, it is good to add `--restart` option to force stop other daemon process if you accidentally started it in other place. Here is an example of supervisord_ setup:: [program:rash-daemon] command=rash daemon --restart .. _supervisord: http://supervisord.org/ Alternatively, you can call ``rash index`` in cron job to avoid using daemon. It is useful if you want to use RASH on NFS, as it looks like watchdog does not work on NFS.:: # Refresh RASH DB every 10 minutes */10 * * * * rash index
[ "Run", "RASH", "index", "daemon", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/daemon.py#L20-L107
tkf/rash
rash/daemon.py
start_daemon_in_subprocess
def start_daemon_in_subprocess(options, outpath=os.devnull): """ Run `rash daemon --no-error` in background. :type options: list of str :arg options: options for "rash daemon" command :type outpath: str :arg outpath: path to redirect daemon output """ import subprocess import sys from .utils.py3compat import nested from .utils.pathutils import mkdirp if outpath != os.devnull: mkdirp(os.path.dirname(outpath)) with nested(open(os.devnull), open(outpath, 'w')) as (stdin, stdout): subprocess.Popen( [os.path.abspath(sys.executable), '-m', 'rash.cli', 'daemon', '--no-error'] + options, preexec_fn=os.setsid, stdin=stdin, stdout=stdout, stderr=subprocess.STDOUT)
python
def start_daemon_in_subprocess(options, outpath=os.devnull): """ Run `rash daemon --no-error` in background. :type options: list of str :arg options: options for "rash daemon" command :type outpath: str :arg outpath: path to redirect daemon output """ import subprocess import sys from .utils.py3compat import nested from .utils.pathutils import mkdirp if outpath != os.devnull: mkdirp(os.path.dirname(outpath)) with nested(open(os.devnull), open(outpath, 'w')) as (stdin, stdout): subprocess.Popen( [os.path.abspath(sys.executable), '-m', 'rash.cli', 'daemon', '--no-error'] + options, preexec_fn=os.setsid, stdin=stdin, stdout=stdout, stderr=subprocess.STDOUT)
[ "def", "start_daemon_in_subprocess", "(", "options", ",", "outpath", "=", "os", ".", "devnull", ")", ":", "import", "subprocess", "import", "sys", "from", ".", "utils", ".", "py3compat", "import", "nested", "from", ".", "utils", ".", "pathutils", "import", "...
Run `rash daemon --no-error` in background. :type options: list of str :arg options: options for "rash daemon" command :type outpath: str :arg outpath: path to redirect daemon output
[ "Run", "rash", "daemon", "--", "no", "-", "error", "in", "background", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/daemon.py#L124-L146
tkf/rash
rash/show.py
show_run
def show_run(command_history_id): """ Show detailed command history by its ID. """ from pprint import pprint from .config import ConfigStore from .database import DataBase db = DataBase(ConfigStore().db_path) with db.connection(): for ch_id in command_history_id: crec = db.get_full_command_record(ch_id) pprint(crec.__dict__) print("")
python
def show_run(command_history_id): """ Show detailed command history by its ID. """ from pprint import pprint from .config import ConfigStore from .database import DataBase db = DataBase(ConfigStore().db_path) with db.connection(): for ch_id in command_history_id: crec = db.get_full_command_record(ch_id) pprint(crec.__dict__) print("")
[ "def", "show_run", "(", "command_history_id", ")", ":", "from", "pprint", "import", "pprint", "from", ".", "config", "import", "ConfigStore", "from", ".", "database", "import", "DataBase", "db", "=", "DataBase", "(", "ConfigStore", "(", ")", ".", "db_path", ...
Show detailed command history by its ID.
[ "Show", "detailed", "command", "history", "by", "its", "ID", "." ]
train
https://github.com/tkf/rash/blob/585da418ec37dd138f1a4277718b6f507e9536a2/rash/show.py#L17-L29
pebble/libpebble2
libpebble2/services/blobdb.py
BlobDBClient.insert
def insert(self, database, key, value, callback=None): """ Insert an item into the given database. :param database: The database into which to insert the value. :type database: .BlobDatabaseID :param key: The key to insert. :type key: uuid.UUID :param value: The value to insert. :type value: bytes :param callback: A callback to be called on success or failure. """ token = self._get_token() self._enqueue(self._PendingItem(token, BlobCommand(token=token, database=database, content=InsertCommand(key=key.bytes, value=value)), callback))
python
def insert(self, database, key, value, callback=None): """ Insert an item into the given database. :param database: The database into which to insert the value. :type database: .BlobDatabaseID :param key: The key to insert. :type key: uuid.UUID :param value: The value to insert. :type value: bytes :param callback: A callback to be called on success or failure. """ token = self._get_token() self._enqueue(self._PendingItem(token, BlobCommand(token=token, database=database, content=InsertCommand(key=key.bytes, value=value)), callback))
[ "def", "insert", "(", "self", ",", "database", ",", "key", ",", "value", ",", "callback", "=", "None", ")", ":", "token", "=", "self", ".", "_get_token", "(", ")", "self", ".", "_enqueue", "(", "self", ".", "_PendingItem", "(", "token", ",", "BlobCom...
Insert an item into the given database. :param database: The database into which to insert the value. :type database: .BlobDatabaseID :param key: The key to insert. :type key: uuid.UUID :param value: The value to insert. :type value: bytes :param callback: A callback to be called on success or failure.
[ "Insert", "an", "item", "into", "the", "given", "database", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/services/blobdb.py#L59-L74
pebble/libpebble2
libpebble2/services/blobdb.py
BlobDBClient.delete
def delete(self, database, key, callback=None): """ Delete an item from the given database. :param database: The database from which to delete the value. :type database: .BlobDatabaseID :param key: The key to delete. :type key: uuid.UUID :param callback: A callback to be called on success or failure. """ token = self._get_token() self._enqueue(self._PendingItem(token, BlobCommand(token=token, database=database, content=DeleteCommand(key=key.bytes)), callback))
python
def delete(self, database, key, callback=None): """ Delete an item from the given database. :param database: The database from which to delete the value. :type database: .BlobDatabaseID :param key: The key to delete. :type key: uuid.UUID :param callback: A callback to be called on success or failure. """ token = self._get_token() self._enqueue(self._PendingItem(token, BlobCommand(token=token, database=database, content=DeleteCommand(key=key.bytes)), callback))
[ "def", "delete", "(", "self", ",", "database", ",", "key", ",", "callback", "=", "None", ")", ":", "token", "=", "self", ".", "_get_token", "(", ")", "self", ".", "_enqueue", "(", "self", ".", "_PendingItem", "(", "token", ",", "BlobCommand", "(", "t...
Delete an item from the given database. :param database: The database from which to delete the value. :type database: .BlobDatabaseID :param key: The key to delete. :type key: uuid.UUID :param callback: A callback to be called on success or failure.
[ "Delete", "an", "item", "from", "the", "given", "database", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/services/blobdb.py#L76-L89
pebble/libpebble2
libpebble2/services/blobdb.py
BlobDBClient.clear
def clear(self, database, callback=None): """ Wipe the given database. This only affects items inserted remotely; items inserted on the watch (e.g. alarm clock timeline pins) are not removed. :param database: The database to wipe. :type database: .BlobDatabaseID :param callback: A callback to be called on success or failure. """ token = self._get_token() self._enqueue(self._PendingItem(token, BlobCommand(token=token, database=database, content=ClearCommand()), callback))
python
def clear(self, database, callback=None): """ Wipe the given database. This only affects items inserted remotely; items inserted on the watch (e.g. alarm clock timeline pins) are not removed. :param database: The database to wipe. :type database: .BlobDatabaseID :param callback: A callback to be called on success or failure. """ token = self._get_token() self._enqueue(self._PendingItem(token, BlobCommand(token=token, database=database, content=ClearCommand()), callback))
[ "def", "clear", "(", "self", ",", "database", ",", "callback", "=", "None", ")", ":", "token", "=", "self", ".", "_get_token", "(", ")", "self", ".", "_enqueue", "(", "self", ".", "_PendingItem", "(", "token", ",", "BlobCommand", "(", "token", "=", "...
Wipe the given database. This only affects items inserted remotely; items inserted on the watch (e.g. alarm clock timeline pins) are not removed. :param database: The database to wipe. :type database: .BlobDatabaseID :param callback: A callback to be called on success or failure.
[ "Wipe", "the", "given", "database", ".", "This", "only", "affects", "items", "inserted", "remotely", ";", "items", "inserted", "on", "the", "watch", "(", "e", ".", "g", ".", "alarm", "clock", "timeline", "pins", ")", "are", "not", "removed", "." ]
train
https://github.com/pebble/libpebble2/blob/23e2eb92cfc084e6f9e8c718711ac994ef606d18/libpebble2/services/blobdb.py#L91-L103