text
stringlengths
81
112k
Repaint the canvas item. This will occur on a thread. def _repaint(self, drawing_context: DrawingContext.DrawingContext): """Repaint the canvas item. This will occur on a thread.""" # canvas size canvas_width = self.canvas_size.width canvas_height = self.canvas_size.height with drawing_context.saver(): if self.__color_map_data is not None: rgba_image = numpy.empty((4,) + self.__color_map_data.shape[:-1], dtype=numpy.uint32) Image.get_rgb_view(rgba_image)[:] = self.__color_map_data[numpy.newaxis, :, :] # scalar data assigned to each component of rgb view Image.get_alpha_view(rgba_image)[:] = 255 drawing_context.draw_image(rgba_image, 0, 0, canvas_width, canvas_height)
Parameters used to initialize the class def get_params(self): "Parameters used to initialize the class" import inspect a = inspect.getargspec(self.__init__)[0] out = dict() for key in a[1:]: value = getattr(self, "_%s" % key, None) out[key] = value return out
Instance file name def signature(self): "Instance file name" kw = self.get_params() keys = sorted(kw.keys()) l = [] for k in keys: n = k[0] + k[-1] v = kw[k] if k == 'function_set': v = "_".join([x.__name__[0] + x.__name__[-1] + str(x.nargs) for x in kw[k]]) elif k == 'population_class': v = kw[k].__name__ else: v = str(v) l.append('{0}_{1}'.format(n, v)) return '-'.join(l)
Class containing the population and all the individuals generated def population(self): "Class containing the population and all the individuals generated" try: return self._p except AttributeError: self._p = self._population_class(base=self, tournament_size=self._tournament_size, classifier=self.classifier, labels=self._labels, es_extra_test=self.es_extra_test, popsize=self._popsize, random_generations=self._random_generations, negative_selection=self._negative_selection) return self._p
Returns a random variable with the associated weight def random_leaf(self): "Returns a random variable with the associated weight" for i in range(self._number_tries_feasible_ind): var = np.random.randint(self.nvar) v = self._random_leaf(var) if v is None: continue return v raise RuntimeError("Could not find a suitable random leaf")
Returns an offspring with the associated weight(s) def random_offspring(self): "Returns an offspring with the associated weight(s)" function_set = self.function_set function_selection = self._function_selection_ins function_selection.density = self.population.density function_selection.unfeasible_functions.clear() for i in range(self._number_tries_feasible_ind): if self._function_selection: func_index = function_selection.tournament() else: func_index = function_selection.random_function() func = function_set[func_index] args = self.get_args(func) if args is None: continue args = [self.population.population[x].position for x in args] f = self._random_offspring(func, args) if f is None: function_selection.unfeasible_functions.add(func_index) continue function_selection[func_index] = f.fitness return f raise RuntimeError("Could not find a suitable random offpsring")
Test whether the stopping criteria has been achieved. def stopping_criteria(self): "Test whether the stopping criteria has been achieved." if self.stopping_criteria_tl(): return True if self.generations < np.inf: inds = self.popsize * self.generations flag = inds <= len(self.population.hist) else: flag = False if flag: return True est = self.population.estopping if self._tr_fraction < 1: if est is not None and est.fitness_vs == 0: return True esr = self._early_stopping_rounds if self._tr_fraction < 1 and esr is not None and est is not None: position = self.population.estopping.position if position < self.init_popsize: position = self.init_popsize return (len(self.population.hist) + self._unfeasible_counter - position) > esr return flag
Number of classes of v, also sets the labes def nclasses(self, v): "Number of classes of v, also sets the labes" if not self.classifier: return 0 if isinstance(v, list): self._labels = np.arange(len(v)) return if not isinstance(v, np.ndarray): v = tonparray(v) self._labels = np.unique(v) return self._labels.shape[0]
Evolutive process def fit(self, X, y, test_set=None): """Evolutive process""" self._init_time = time.time() self.X = X if self._popsize == "nvar": self._popsize = self.nvar + len(self._input_functions) if isinstance(test_set, str) and test_set == 'shuffle': test_set = self.shuffle_tr2ts() nclasses = self.nclasses(y) if self.classifier and self._multiple_outputs: pass elif nclasses > 2: assert False self._multiclass = True return self.multiclass(X, y, test_set=test_set) self.y = y if test_set is not None: self.Xtest = test_set for _ in range(self._number_tries_feasible_ind): self._logger.info("Starting evolution") try: self.create_population() if self.stopping_criteria_tl(): break except RuntimeError as err: self._logger.info("Done evolution (RuntimeError (%s), hist: %s)" % (err, len(self.population.hist))) return self self._logger.info("Population created (hist: %s)" % len(self.population.hist)) if len(self.population.hist) >= self._tournament_size: break if len(self.population.hist) == 0: raise RuntimeError("Could not find a suitable individual") if len(self.population.hist) < self._tournament_size: self._logger.info("Done evolution (hist: %s)" % len(self.population.hist)) return self if self._remove_raw_inputs: for x in range(self.nvar): self._X[x] = None while not self.stopping_criteria(): try: a = self.random_offspring() except RuntimeError as err: self._logger.info("Done evolution (RuntimeError (%s), hist: %s)" % (err, len(self.population.hist))) return self self.replace(a) self._logger.info("Done evolution (hist: %s)" % len(self.population.hist)) return self
Decision function i.e. the raw data of the prediction def decision_function(self, v=None, X=None): "Decision function i.e. the raw data of the prediction" m = self.model(v=v) return m.decision_function(X)
In classification this returns the classes, in regression it is equivalent to the decision function def predict(self, v=None, X=None): """In classification this returns the classes, in regression it is equivalent to the decision function""" if X is None: X = v v = None m = self.model(v=v) return m.predict(X)
Gevent-based WSGI-HTTP server. def serve(application, host='127.0.0.1', port=8080): """Gevent-based WSGI-HTTP server.""" # Instantiate the server with a host/port configuration and our application. WSGIServer((host, int(port)), application).serve_forever()
Returns the subscribers for a given object. :param obj: Any object. def get_subscribers(obj): """ Returns the subscribers for a given object. :param obj: Any object. """ ctype = ContentType.objects.get_for_model(obj) return Subscription.objects.filter(content_type=ctype, object_id=obj.pk)
Returns ``True`` if the user is subscribed to the given object. :param user: A ``User`` instance. :param obj: Any object. def is_subscribed(user, obj): """ Returns ``True`` if the user is subscribed to the given object. :param user: A ``User`` instance. :param obj: Any object. """ if not user.is_authenticated(): return False ctype = ContentType.objects.get_for_model(obj) try: Subscription.objects.get( user=user, content_type=ctype, object_id=obj.pk) except Subscription.DoesNotExist: return False return True
Create a new subclass of Context which incorporates instance attributes and new descriptors. This promotes an instance and its instance attributes up to being a class with class attributes, then returns an instance of that class. def _promote(self, name, instantiate=True): """Create a new subclass of Context which incorporates instance attributes and new descriptors. This promotes an instance and its instance attributes up to being a class with class attributes, then returns an instance of that class. """ metaclass = type(self.__class__) contents = self.__dict__.copy() cls = metaclass(str(name), (self.__class__, ), contents) if instantiate: return cls() return cls
Run an individual simulation. The candidate data has been flattened into the sim_var dict. The sim_var dict contains parameter:value key value pairs, which are applied to the model before it is simulated. def run_individual(sim_var, reference, neuroml_file, nml_doc, still_included, generate_dir, target, sim_time, dt, simulator, cleanup = True, show=False): """ Run an individual simulation. The candidate data has been flattened into the sim_var dict. The sim_var dict contains parameter:value key value pairs, which are applied to the model before it is simulated. """ for var_name in sim_var.keys(): individual_var_names = var_name.split('+') for individual_var_name in individual_var_names: words = individual_var_name.split('/') type, id1 = words[0].split(':') if ':' in words[1]: variable, id2 = words[1].split(':') else: variable = words[1] id2 = None units = words[2] value = sim_var[var_name] pyneuroml.pynml.print_comment_v(' Changing value of %s (%s) in %s (%s) to: %s %s'%(variable, id2, type, id1, value, units)) if type == 'channel': channel = nml_doc.get_by_id(id1) if channel: print("Setting channel %s"%(channel)) if variable == 'vShift': channel.v_shift = '%s %s'%(value, units) else: pyneuroml.pynml.print_comment_v('Could not find channel with id %s from expression: %s'%(id1, individual_var_name)) exit() elif type == 'cell': cell = None for c in nml_doc.cells: if c.id == id1: cell = c if variable == 'channelDensity': chanDens = None for cd in cell.biophysical_properties.membrane_properties.channel_densities + cell.biophysical_properties.membrane_properties.channel_density_v_shifts: if cd.id == id2: chanDens = cd chanDens.cond_density = '%s %s'%(value, units) elif variable == 'vShift_channelDensity': chanDens = None for cd in cell.biophysical_properties.membrane_properties.channel_density_v_shifts: if cd.id == id2: chanDens = cd chanDens.v_shift = '%s %s'%(value, units) elif variable == 'channelDensityNernst': chanDens = None for cd in cell.biophysical_properties.membrane_properties.channel_density_nernsts: if cd.id == id2: chanDens = cd chanDens.cond_density = '%s %s'%(value, units) elif variable == 'erev_id': # change all values of erev in channelDensity elements with only this id chanDens = None for cd in cell.biophysical_properties.membrane_properties.channel_densities + cell.biophysical_properties.membrane_properties.channel_density_v_shifts: if cd.id == id2: chanDens = cd chanDens.erev = '%s %s'%(value, units) elif variable == 'erev_ion': # change all values of erev in channelDensity elements with this ion chanDens = None for cd in cell.biophysical_properties.membrane_properties.channel_densities + cell.biophysical_properties.membrane_properties.channel_density_v_shifts: if cd.ion == id2: chanDens = cd chanDens.erev = '%s %s'%(value, units) elif variable == 'specificCapacitance': specCap = None for sc in cell.biophysical_properties.membrane_properties.specific_capacitances: if (sc.segment_groups == None and id2 == 'all') or sc.segment_groups == id2 : specCap = sc specCap.value = '%s %s'%(value, units) elif variable == 'resistivity': resistivity = None for rs in cell.biophysical_properties.intracellular_properties.resistivities: if (rs.segment_groups == None and id2 == 'all') or rs.segment_groups == id2 : resistivity = rs resistivity.value = '%s %s'%(value, units) else: pyneuroml.pynml.print_comment_v('Unknown variable (%s) in variable expression: %s'%(variable, individual_var_name)) exit() elif type == 'izhikevich2007Cell': izhcell = None for c in nml_doc.izhikevich2007_cells: if c.id == id1: izhcell = c izhcell.__setattr__(variable, '%s %s'%(value, units)) else: pyneuroml.pynml.print_comment_v('Unknown type (%s) in variable expression: %s'%(type, individual_var_name)) new_neuroml_file = '%s/%s'%(generate_dir,os.path.basename(neuroml_file)) if new_neuroml_file == neuroml_file: pyneuroml.pynml.print_comment_v('Cannot use a directory for generating into (%s) which is the same location of the NeuroML file (%s)!'% \ (neuroml_file, generate_dir)) pyneuroml.pynml.write_neuroml2_file(nml_doc, new_neuroml_file) for include in still_included: inc_loc = '%s/%s'%(os.path.dirname(os.path.abspath(neuroml_file)),include) pyneuroml.pynml.print_comment_v("Copying non included file %s to %s (%s) beside %s"%(inc_loc, generate_dir,os.path.abspath(generate_dir), new_neuroml_file)) shutil.copy(inc_loc, generate_dir) from pyneuroml.tune.NeuroMLSimulation import NeuroMLSimulation sim = NeuroMLSimulation(reference, neuroml_file = new_neuroml_file, target = target, sim_time = sim_time, dt = dt, simulator = simulator, generate_dir = generate_dir, cleanup = cleanup, nml_doc = nml_doc) sim.go() if show: sim.show() return sim.t, sim.volts
Run simulation for each candidate This run method will loop through each candidate and run the simulation corresponding to its parameter values. It will populate an array called traces with the resulting voltage traces for the simulation and return it. def run(self,candidates,parameters): """ Run simulation for each candidate This run method will loop through each candidate and run the simulation corresponding to its parameter values. It will populate an array called traces with the resulting voltage traces for the simulation and return it. """ traces = [] start_time = time.time() if self.num_parallel_evaluations == 1: for candidate_i in range(len(candidates)): candidate = candidates[candidate_i] sim_var = dict(zip(parameters,candidate)) pyneuroml.pynml.print_comment_v('\n\n - RUN %i (%i/%i); variables: %s\n'%(self.count,candidate_i+1,len(candidates),sim_var)) self.count+=1 t,v = self.run_individual(sim_var) traces.append([t,v]) else: import pp ppservers = () job_server = pp.Server(self.num_parallel_evaluations, ppservers=ppservers) pyneuroml.pynml.print_comment_v('Running %i candidates across %i local processes'%(len(candidates),job_server.get_ncpus())) jobs = [] for candidate_i in range(len(candidates)): candidate = candidates[candidate_i] sim_var = dict(zip(parameters,candidate)) pyneuroml.pynml.print_comment_v('\n\n - PARALLEL RUN %i (%i/%i of curr candidates); variables: %s\n'%(self.count,candidate_i+1,len(candidates),sim_var)) self.count+=1 cand_dir = self.generate_dir+"/CANDIDATE_%s"%candidate_i if not os.path.exists(cand_dir): os.mkdir(cand_dir) vars = (sim_var, self.ref, self.neuroml_file, self.nml_doc, self.still_included, cand_dir, self.target, self.sim_time, self.dt, self.simulator) job = job_server.submit(run_individual, vars, (), ("pyneuroml.pynml",'pyneuroml.tune.NeuroMLSimulation','shutil','neuroml')) jobs.append(job) for job_i in range(len(jobs)): job = jobs[job_i] pyneuroml.pynml.print_comment_v("Checking parallel job %i/%i; set running so far: %i"%(job_i,len(jobs),self.count)) t,v = job() traces.append([t,v]) #pyneuroml.pynml.print_comment_v("Obtained: %s"%result) ####job_server.print_stats() job_server.destroy() print("-------------------------------------------") end_time = time.time() tot = (end_time-start_time) pyneuroml.pynml.print_comment_v('Ran %i candidates in %s seconds (~%ss per job)'%(len(candidates),tot,tot/len(candidates))) return traces
Executed prior to processing a request. def prepare(self, context): """Executed prior to processing a request.""" if __debug__: log.debug("Assigning thread local request context.") self.local.context = context
Register a handler for a given type, class, interface, or abstract base class. View registration should happen within the `start` callback of an extension. For example, to register the previous `json` view example: class JSONExtension: def start(self, context): context.view.register(tuple, json) The approach of explicitly referencing a view handler isn't very easy to override without also replacing the extension originally adding it, however there is another approach. Using named handlers registered as discrete plugins (via the `entry_point` argument in `setup.py`) allows the extension to easily ask "what's my handler?" class JSONExtension: def start(self, context): context.view.register( tuple, context.view.json ) Otherwise unknown attributes of the view registry will attempt to look up a handler plugin by that name. def register(self, kind, handler): """Register a handler for a given type, class, interface, or abstract base class. View registration should happen within the `start` callback of an extension. For example, to register the previous `json` view example: class JSONExtension: def start(self, context): context.view.register(tuple, json) The approach of explicitly referencing a view handler isn't very easy to override without also replacing the extension originally adding it, however there is another approach. Using named handlers registered as discrete plugins (via the `entry_point` argument in `setup.py`) allows the extension to easily ask "what's my handler?" class JSONExtension: def start(self, context): context.view.register( tuple, context.view.json ) Otherwise unknown attributes of the view registry will attempt to look up a handler plugin by that name. """ if __debug__: # In production this logging is completely skipped, regardless of logging level. if py3 and not pypy: # Where possible, we shorten things to just the cannonical name. log.debug("Registering view handler.", extra=dict(type=name(kind), handler=name(handler))) else: # Canonical name lookup is not entirely reliable on some combinations. log.debug("Registering view handler.", extra=dict(type=repr(kind), handler=repr(handler))) # Add the handler to the pool of candidates. This adds to a list instead of replacing the "dictionary item". self._map.add(kind, handler) return handler
Serve files from disk. This utility endpoint factory is meant primarily for use in development environments; in production environments it is better (more efficient, secure, etc.) to serve your static content using a front end load balancer such as Nginx. The first argument, `base`, represents the base path to serve files from. Paths below the attachment point for the generated endpoint will combine this base path with the remaining path elements to determine the file to serve. The second argument is an optional dictionary mapping filename extensions to template engines, for cooperation with the TemplateExtension. (See: https://github.com/marrow/template) The result of attempting to serve a mapped path is a 2-tuple of `("{mapping}:{path}", dict())`. For example, to render all `.html` files as Mako templates, you would attach something like the following: class Root: page = static('/path/to/static/pages', dict(html='mako')) By default the "usual culprits" are served with far-futures cache expiry headers. If you wish to change the extensions searched just assign a new `far` iterable. To disable, assign any falsy value. def static(base, mapping=None, far=('js', 'css', 'gif', 'jpg', 'jpeg', 'png', 'ttf', 'woff')): """Serve files from disk. This utility endpoint factory is meant primarily for use in development environments; in production environments it is better (more efficient, secure, etc.) to serve your static content using a front end load balancer such as Nginx. The first argument, `base`, represents the base path to serve files from. Paths below the attachment point for the generated endpoint will combine this base path with the remaining path elements to determine the file to serve. The second argument is an optional dictionary mapping filename extensions to template engines, for cooperation with the TemplateExtension. (See: https://github.com/marrow/template) The result of attempting to serve a mapped path is a 2-tuple of `("{mapping}:{path}", dict())`. For example, to render all `.html` files as Mako templates, you would attach something like the following: class Root: page = static('/path/to/static/pages', dict(html='mako')) By default the "usual culprits" are served with far-futures cache expiry headers. If you wish to change the extensions searched just assign a new `far` iterable. To disable, assign any falsy value. """ base = abspath(base) @staticmethod def static_handler(context, *parts, **kw): path = normpath(pathjoin(base, *parts)) if __debug__: log.debug("Attempting to serve static file.", extra=dict( request = id(context), base = base, path = path )) if not path.startswith(base): # Ensure we only serve files from the allowed path. raise HTTPForbidden("Cowardly refusing to violate base path policy." if __debug__ else None) if not exists(path): # Do the right thing if the file doesn't actually exist. raise HTTPNotFound() if not isfile(path): # Only serve normal files; no UNIX domain sockets, FIFOs, etc., etc. raise HTTPForbidden("Cowardly refusing to open a non-file." if __debug__ else None) if far and path.rpartition('.')[2] in far: context.response.cache_expires = 60*60*24*365 if mapping: # Handle the mapping of filename extensions to 2-tuples. 'Cause why not? _, _, extension = basename(path).partition('.') if extension in mapping: return mapping[extension] + ':' + path, dict() return open(path, 'rb') return static_handler
Diesel-based (greenlet) WSGI-HTTP server. As a minor note, this is crazy. Diesel includes Flask, too. def serve(application, host='127.0.0.1', port=8080): """Diesel-based (greenlet) WSGI-HTTP server. As a minor note, this is crazy. Diesel includes Flask, too. """ # Instantiate the server with a host/port configuration and our application. WSGIApplication(application, port=int(port), iface=host).run()
Parse command-line arguments. def process_args(): """ Parse command-line arguments. """ parser = argparse.ArgumentParser(description="A script for plotting files containing spike time data") parser.add_argument('spiketimeFiles', type=str, metavar='<spiketime file>', help='List of text file containing spike times', nargs='+') parser.add_argument('-format', type=str, metavar='<format>', default=DEFAULTS['format'], help='How the spiketimes are represented on each line of file:\n'+\ 'id_t: id of cell, space(s)/tab(s), time of spike (default);\n'+\ 't_id: time of spike, space(s)/tab(s), id of cell;\n'+\ 'sonata: SONATA format HDF5 file containing spike times') parser.add_argument('-rates', action='store_true', default=DEFAULTS['rates'], help='Show a plot of rates') parser.add_argument('-showPlotsAlready', action='store_true', default=DEFAULTS['show_plots_already'], help='Show plots once generated') parser.add_argument('-saveSpikePlotTo', type=str, metavar='<spiketime plot filename>', default=DEFAULTS['save_spike_plot_to'], help='Name of file in which to save spiketime plot') parser.add_argument('-rateWindow', type=int, metavar='<rate window>', default=DEFAULTS['rate_window'], help='Window for rate calculation in ms') parser.add_argument('-rateBins', type=int, metavar='<rate bins>', default=DEFAULTS['rate_bins'], help='Number of bins for rate histogram') return parser.parse_args()
Python-standard WSGI-HTTP server for testing purposes. The additional work performed here is to match the default startup output of "waitress". This is not a production quality interface and will be have badly under load. def simple(application, host='127.0.0.1', port=8080): """Python-standard WSGI-HTTP server for testing purposes. The additional work performed here is to match the default startup output of "waitress". This is not a production quality interface and will be have badly under load. """ # Try to be handy as many terminals allow clicking links. print("serving on http://{0}:{1}".format(host, port)) # Bind and launch the server; this is a blocking operation. make_server(host, int(port), application).serve_forever()
A specialized version of the reference WSGI-CGI server to adapt to Microsoft IIS quirks. This is not a production quality interface and will behave badly under load. def iiscgi(application): """A specialized version of the reference WSGI-CGI server to adapt to Microsoft IIS quirks. This is not a production quality interface and will behave badly under load. """ try: from wsgiref.handlers import IISCGIHandler except ImportError: print("Python 3.2 or newer is required.") if not __debug__: warnings.warn("Interactive debugging and other persistence-based processes will not work.") IISCGIHandler().run(application)
Basic FastCGI support via flup. This web server has many, many options. Please see the Flup project documentation for details. def serve(application, host='127.0.0.1', port=8080, socket=None, **options): """Basic FastCGI support via flup. This web server has many, many options. Please see the Flup project documentation for details. """ # Allow either on-disk socket (recommended) or TCP/IP socket use. if not socket: bindAddress = (host, int(port)) else: bindAddress = socket # Bind and start the blocking web server interface. WSGIServer(application, bindAddress=bindAddress, **options).run()
Helper method. Returns kwargs needed to filter the correct object. Can also be used to create the correct object. def _get_method_kwargs(self): """ Helper method. Returns kwargs needed to filter the correct object. Can also be used to create the correct object. """ method_kwargs = { 'user': self.user, 'content_type': self.ctype, 'object_id': self.content_object.pk, } return method_kwargs
Adds a subscription for the given user to the given object. def save(self, *args, **kwargs): """Adds a subscription for the given user to the given object.""" method_kwargs = self._get_method_kwargs() try: subscription = Subscription.objects.get(**method_kwargs) except Subscription.DoesNotExist: subscription = Subscription.objects.create(**method_kwargs) return subscription
Add the usual suspects to the context. This adds `request`, `response`, and `path` to the `RequestContext` instance. def prepare(self, context): """Add the usual suspects to the context. This adds `request`, `response`, and `path` to the `RequestContext` instance. """ if __debug__: log.debug("Preparing request context.", extra=dict(request=id(context))) # Bridge in WebOb `Request` and `Response` objects. # Extensions shouldn't rely on these, using `environ` where possible instead. context.request = Request(context.environ) context.response = Response(request=context.request) # Record the initial path representing the point where a front-end web server bridged to us. context.environ['web.base'] = context.request.script_name # Track the remaining (unprocessed) path elements. context.request.remainder = context.request.path_info.split('/') if context.request.remainder and not context.request.remainder[0]: del context.request.remainder[0] # Track the "breadcrumb list" of dispatch through distinct controllers. context.path = Bread()
Called as dispatch descends into a tier. The base extension uses this to maintain the "current url". def dispatch(self, context, consumed, handler, is_endpoint): """Called as dispatch descends into a tier. The base extension uses this to maintain the "current url". """ request = context.request if __debug__: log.debug("Handling dispatch event.", extra=dict( request = id(context), consumed = consumed, handler = safe_name(handler), endpoint = is_endpoint )) # The leading path element (leading slash) requires special treatment. if not consumed and context.request.path_info_peek() == '': consumed = [''] nConsumed = 0 if consumed: # Migrate path elements consumed from the `PATH_INFO` to `SCRIPT_NAME` WSGI environment variables. if not isinstance(consumed, (list, tuple)): consumed = consumed.split('/') for element in consumed: if element == context.request.path_info_peek(): context.request.path_info_pop() nConsumed += 1 else: break # Update the breadcrumb list. context.path.append(Crumb(handler, Path(request.script_name))) if consumed: # Lastly, update the remaining path element list. request.remainder = request.remainder[nConsumed:]
Render empty responses. def render_none(self, context, result): """Render empty responses.""" context.response.body = b'' del context.response.content_length return True
Return binary responses unmodified. def render_binary(self, context, result): """Return binary responses unmodified.""" context.response.app_iter = iter((result, )) # This wraps the binary string in a WSGI body iterable. return True
Perform appropriate metadata wrangling for returned open file handles. def render_file(self, context, result): """Perform appropriate metadata wrangling for returned open file handles.""" if __debug__: log.debug("Processing file-like object.", extra=dict(request=id(context), result=repr(result))) response = context.response response.conditional_response = True modified = mktime(gmtime(getmtime(result.name))) response.last_modified = datetime.fromtimestamp(modified) ct, ce = guess_type(result.name) if not ct: ct = 'application/octet-stream' response.content_type, response.content_encoding = ct, ce response.etag = unicode(modified) result.seek(0, 2) # Seek to the end of the file. response.content_length = result.tell() result.seek(0) # Seek back to the start of the file. response.body_file = result return True
Attempt to serve generator responses through stream encoding. This allows for direct use of cinje template functions, which are generators, as returned views. def render_generator(self, context, result): """Attempt to serve generator responses through stream encoding. This allows for direct use of cinje template functions, which are generators, as returned views. """ context.response.encoding = 'utf8' context.response.app_iter = ( (i.encode('utf8') if isinstance(i, unicode) else i) # Stream encode unicode chunks. for i in result if i is not None # Skip None values. ) return True
CherryPy-based WSGI-HTTP server. def serve(application, host='127.0.0.1', port=8080): """CherryPy-based WSGI-HTTP server.""" # Instantiate the server with our configuration and application. server = CherryPyWSGIServer((host, int(port)), application, server_name=host) # Try to be handy as many terminals allow clicking links. print("serving on http://{0}:{1}".format(host, port)) # Bind and launch the server; this is a blocking operation. try: server.start() except KeyboardInterrupt: server.stop()
Returns the colored string def colorize(self, string, rgb=None, ansi=None, bg=None, ansi_bg=None): '''Returns the colored string''' if not isinstance(string, str): string = str(string) if rgb is None and ansi is None: raise TerminalColorMapException( 'colorize: must specify one named parameter: rgb or ansi') if rgb is not None and ansi is not None: raise TerminalColorMapException( 'colorize: must specify only one named parameter: rgb or ansi') if bg is not None and ansi_bg is not None: raise TerminalColorMapException( 'colorize: must specify only one named parameter: bg or ansi_bg') if rgb is not None: (closestAnsi, closestRgb) = self.convert(rgb) elif ansi is not None: (closestAnsi, closestRgb) = (ansi, self.colors[ansi]) if bg is None and ansi_bg is None: return "\033[38;5;{ansiCode:d}m{string:s}\033[0m".format(ansiCode=closestAnsi, string=string) if bg is not None: (closestBgAnsi, unused) = self.convert(bg) elif ansi_bg is not None: (closestBgAnsi, unused) = (ansi_bg, self.colors[ansi_bg]) return "\033[38;5;{ansiCode:d}m\033[48;5;{bf:d}m{string:s}\033[0m".format(ansiCode=closestAnsi, bf=closestBgAnsi, string=string)
Render serialized responses. def render_serialization(self, context, result): """Render serialized responses.""" resp = context.response serial = context.serialize match = context.request.accept.best_match(serial.types, default_match=self.default) result = serial[match](result) if isinstance(result, str): result = result.decode('utf-8') resp.charset = 'utf-8' resp.content_type = match resp.text = result return True
Eventlet-based WSGI-HTTP server. For a more fully-featured Eventlet-capable interface, see also [Spawning](http://pypi.python.org/pypi/Spawning/). def serve(application, host='127.0.0.1', port=8080): """Eventlet-based WSGI-HTTP server. For a more fully-featured Eventlet-capable interface, see also [Spawning](http://pypi.python.org/pypi/Spawning/). """ # Instantiate the server with a bound port and with our application. server(listen(host, int(port)), application)
Main def main(args=None): """Main""" vs = [(v-100)*0.001 for v in range(200)] for f in ['IM.channel.nml','Kd.channel.nml']: nml_doc = pynml.read_neuroml2_file(f) for ct in nml_doc.ComponentType: ys = [] for v in vs: req_variables = {'v':'%sV'%v,'vShift':'10mV'} vals = pynml.evaluate_component(ct,req_variables=req_variables) print vals if 'x' in vals: ys.append(vals['x']) if 't' in vals: ys.append(vals['t']) if 'r' in vals: ys.append(vals['r']) ax = pynml.generate_plot([vs],[ys], "Some traces from %s in %s"%(ct.name,f), show_plot_already=False ) print vals plt.show()
Parse command-line arguments. def process_args(): """ Parse command-line arguments. """ parser = argparse.ArgumentParser( description=("A script which can be run to generate a LEMS " "file to analyse the behaviour of channels in " "NeuroML 2")) parser.add_argument('channelFiles', type=str, nargs='+', metavar='<NeuroML 2 Channel file>', help="Name of the NeuroML 2 file(s)") parser.add_argument('-v', action='store_true', default=DEFAULTS['v'], help="Verbose output") parser.add_argument('-minV', type=int, metavar='<min v>', default=DEFAULTS['minV'], help="Minimum voltage to test (integer, mV), default: %smV"%DEFAULTS['minV']) parser.add_argument('-maxV', type=int, metavar='<max v>', default=DEFAULTS['maxV'], help="Maximum voltage to test (integer, mV), default: %smV"%DEFAULTS['maxV']) parser.add_argument('-temperature', type=float, metavar='<temperature>', default=DEFAULTS['temperature'], help="Temperature (float, celsius), default: %sdegC"%DEFAULTS['temperature']) parser.add_argument('-duration', type=float, metavar='<duration>', default=DEFAULTS['duration'], help="Duration of simulation in ms, default: %sms"%DEFAULTS['duration']) parser.add_argument('-clampDelay', type=float, metavar='<clamp delay>', default=DEFAULTS['clampDelay'], help="Delay before voltage clamp is activated in ms, default: %sms"%DEFAULTS['clampDelay']) parser.add_argument('-clampDuration', type=float, metavar='<clamp duration>', default=DEFAULTS['clampDuration'], help="Duration of voltage clamp in ms, default: %sms"%DEFAULTS['clampDuration']) parser.add_argument('-clampBaseVoltage', type=float, metavar='<clamp base voltage>', default=DEFAULTS['clampBaseVoltage'], help="Clamp base (starting/finishing) voltage in mV, default: %smV"%DEFAULTS['clampBaseVoltage']) parser.add_argument('-stepTargetVoltage', type=float, metavar='<step target voltage>', default=DEFAULTS['stepTargetVoltage'], help=("Voltage in mV through which to step voltage clamps, default: %smV"%DEFAULTS['stepTargetVoltage'])) parser.add_argument('-erev', type=float, metavar='<reversal potential>', default=DEFAULTS['erev'], help="Reversal potential of channel for currents, default: %smV"%DEFAULTS['erev']) parser.add_argument('-scaleDt', type=float, metavar='<scale dt in generated LEMS>', default=DEFAULTS['scaleDt'], help="Scale dt in generated LEMS, default: %s"%DEFAULTS['scaleDt']) parser.add_argument('-caConc', type=float, metavar='<Ca2+ concentration>', default=DEFAULTS['caConc'], help=("Internal concentration of Ca2+ (float, " "concentration in mM), default: %smM"%DEFAULTS['caConc'])) parser.add_argument('-datSuffix', type=str, metavar='<dat suffix>', default=DEFAULTS['datSuffix'], help="String to add to dat file names (before .dat)") parser.add_argument('-norun', action='store_true', default=DEFAULTS['norun'], help=("If used, just generate the LEMS file, " "don't run it")) parser.add_argument('-nogui', action='store_true', default=DEFAULTS['nogui'], help=("Supress plotting of variables and only save " "data to file")) parser.add_argument('-html', action='store_true', default=DEFAULTS['html'], help=("Generate a HTML page featuring the plots for the " "channel")) parser.add_argument('-md', action='store_true', default=DEFAULTS['md'], help=("Generate a (GitHub flavoured) Markdown page featuring the plots for the " "channel")) parser.add_argument('-ivCurve', action='store_true', default=DEFAULTS['ivCurve'], help=("Save currents through voltage clamp at each " "level & plot current vs voltage for ion " "channel")) return parser.parse_args()
A single IV curve def plot_iv_curve(a, hold_v, i, *plt_args, **plt_kwargs): """A single IV curve""" grid = plt_kwargs.pop('grid',True) same_fig = plt_kwargs.pop('same_fig',False) if not len(plt_args): plt_args = ('ko-',) if 'label' not in plt_kwargs: plt_kwargs['label'] = 'Current' if not same_fig: make_iv_curve_fig(a, grid=grid) if type(i) is dict: i = [i[v] for v in hold_v] plt.plot([v*1e3 for v in hold_v], [ii*1e12 for ii in i], *plt_args, **plt_kwargs) plt.legend(loc=2)
Multipart AJAX request example. See: http://test.getify.com/mpAjax/description.html def root(context): """Multipart AJAX request example. See: http://test.getify.com/mpAjax/description.html """ response = context.response parts = [] for i in range(12): for j in range(12): parts.append(executor.submit(mul, i, j)) def stream(parts, timeout=None): try: for future in as_completed(parts, timeout): mime, result = future.result() result = result.encode('utf8') yield "!!!!!!=_NextPart_{num}\nContent-Type: {mime}\nContent-Length: {length}\n\n".format( num = randint(100000000, 999999999), mime = mime, length = len(result) ).encode('utf8') + result except TimeoutError: for future in parts: future.cancel() response.content_length = None response.app_iter = stream(parts, 0.2) return response
Get a file and render the content of the template_file_name with kwargs in a file :param file: A File Stream to write :param template_file_name: path to route with template name :param **kwargs: Args to be rendered in template def render_template_with_args_in_file(file, template_file_name, **kwargs): """ Get a file and render the content of the template_file_name with kwargs in a file :param file: A File Stream to write :param template_file_name: path to route with template name :param **kwargs: Args to be rendered in template """ template_file_content = "".join( codecs.open( template_file_name, encoding='UTF-8' ).readlines() ) template_rendered = string.Template(template_file_content).safe_substitute(**kwargs) file.write(template_rendered)
Creates a file or open the file with file_name name :param file_name: String with a filename :param initial_template_file_name: String with path to initial template :param args: from console to determine path to save the files def create_or_open(file_name, initial_template_file_name, args): """ Creates a file or open the file with file_name name :param file_name: String with a filename :param initial_template_file_name: String with path to initial template :param args: from console to determine path to save the files """ file = None if not os.path.isfile( os.path.join( args['django_application_folder'], file_name ) ): # If file_name does not exists, create file = codecs.open( os.path.join( args['django_application_folder'], file_name ), 'w+', encoding='UTF-8' ) print("Creating {}".format(file_name)) if initial_template_file_name: render_template_with_args_in_file(file, initial_template_file_name, **{}) else: # If file exists, just load the file file = codecs.open( os.path.join( args['django_application_folder'], file_name ), 'a+', encoding='UTF-8' ) return file
In general we have a initial template and then insert new data, so we dont repeat the schema for each module :param module_name: String with module name :paran **kwargs: Args to be rendered in template def generic_insert_module(module_name, args, **kwargs): """ In general we have a initial template and then insert new data, so we dont repeat the schema for each module :param module_name: String with module name :paran **kwargs: Args to be rendered in template """ file = create_or_open( '{}.py'.format(module_name), os.path.join( BASE_TEMPLATES_DIR, '{}_initial.py.tmpl'.format(module_name) ), args ) render_template_with_args_in_file( file, os.path.join( BASE_TEMPLATES_DIR, '{}.py.tmpl'.format(module_name) ), **kwargs ) file.close()
Verify if the work folder is a django app. A valid django app always must have a models.py file :return: None def sanity_check(args): """ Verify if the work folder is a django app. A valid django app always must have a models.py file :return: None """ if not os.path.isfile( os.path.join( args['django_application_folder'], 'models.py' ) ): print("django_application_folder is not a Django application folder") sys.exit(1)
In general if we need to put a file on a folder, we use this method def generic_insert_with_folder(folder_name, file_name, template_name, args): """ In general if we need to put a file on a folder, we use this method """ # First we make sure views are a package instead a file if not os.path.isdir( os.path.join( args['django_application_folder'], folder_name ) ): os.mkdir(os.path.join(args['django_application_folder'], folder_name)) codecs.open( os.path.join( args['django_application_folder'], folder_name, '__init__.py' ), 'w+' ) view_file = create_or_open( os.path.join( folder_name, '{}.py'.format(file_name) ), '', args ) # Load content from template render_template_with_args_in_file( view_file, os.path.join( BASE_TEMPLATES_DIR, template_name ), model_name=args['model_name'], model_prefix=args['model_prefix'], model_name_lower=args['model_name'].lower(), application_name=args['django_application_folder'].split("/")[-1] ) view_file.close()
The recommended development HTTP server. Note that this server performs additional buffering and will not honour chunked encoding breaks. def serve(application, host='127.0.0.1', port=8080, threads=4, **kw): """The recommended development HTTP server. Note that this server performs additional buffering and will not honour chunked encoding breaks. """ # Bind and start the server; this is a blocking process. serve_(application, host=host, port=int(port), threads=int(threads), **kw)
Plot the result of the simulation once it's been intialized def show(self): """ Plot the result of the simulation once it's been intialized """ from matplotlib import pyplot as plt if self.already_run: for ref in self.volts.keys(): plt.plot(self.t, self.volts[ref], label=ref) plt.title("Simulation voltage vs time") plt.legend() plt.xlabel("Time [ms]") plt.ylabel("Voltage [mV]") else: pynml.print_comment("First you have to 'go()' the simulation.", True) plt.show()
Multiply two values together and return the result via JSON. Python 3 function annotations are used to ensure that the arguments are integers. This requires the functionality of `web.ext.annotation:AnnotationExtension`. There are several ways to execute this method: * POST http://localhost:8080/mul * GET http://localhost:8080/mul?a=27&b=42 * GET http://localhost:8080/mul/27/42 The latter relies on the fact we can't descend past a callable method so the remaining path elements are used as positional arguments, whereas the others rely on keyword argument assignment from a form-encoded request body or query string arguments. (Security note: any data in the request body takes presidence over query string arguments!) You can easily test these on the command line using cURL: curl http://localhost:8080/mul/27/42 # HTTP GET curl -d a=27 -d b=42 http://localhost:8080/mul # HTTP POST def mul(self, a: int = None, b: int = None) -> 'json': """Multiply two values together and return the result via JSON. Python 3 function annotations are used to ensure that the arguments are integers. This requires the functionality of `web.ext.annotation:AnnotationExtension`. There are several ways to execute this method: * POST http://localhost:8080/mul * GET http://localhost:8080/mul?a=27&b=42 * GET http://localhost:8080/mul/27/42 The latter relies on the fact we can't descend past a callable method so the remaining path elements are used as positional arguments, whereas the others rely on keyword argument assignment from a form-encoded request body or query string arguments. (Security note: any data in the request body takes presidence over query string arguments!) You can easily test these on the command line using cURL: curl http://localhost:8080/mul/27/42 # HTTP GET curl -d a=27 -d b=42 http://localhost:8080/mul # HTTP POST """ if not a or not b: return dict(message="Pass arguments a and b to multiply them together!") return dict(answer=a * b)
Returns the colored string to print on the terminal. This function detects the terminal type and if it is supported and the output is not going to a pipe or a file, then it will return the colored string, otherwise it will return the string without modifications. string = the string to print. Only accepts strings, unicode strings must be encoded in advance. rgb = Rgb color for the text; for example 0xFF0000 is red. ansi = Ansi for the text bg = Rgb color for the background ansi_bg= Ansi color for the background fd = The file descriptor that will be used by print, by default is the stdout def colorize(string, rgb=None, ansi=None, bg=None, ansi_bg=None, fd=1): '''Returns the colored string to print on the terminal. This function detects the terminal type and if it is supported and the output is not going to a pipe or a file, then it will return the colored string, otherwise it will return the string without modifications. string = the string to print. Only accepts strings, unicode strings must be encoded in advance. rgb = Rgb color for the text; for example 0xFF0000 is red. ansi = Ansi for the text bg = Rgb color for the background ansi_bg= Ansi color for the background fd = The file descriptor that will be used by print, by default is the stdout ''' #Reinitializes if fd used is different if colorize.fd != fd: colorize.init = False colorize.fd = fd #Checks if it is on a terminal, and if the terminal is recognized if not colorize.init: colorize.init = True colorize.is_term = isatty(fd) if 'TERM' in environ: if environ['TERM'].startswith('xterm'): colorize.cmap = XTermColorMap() elif environ['TERM'] == 'vt100': colorize.cmap = VT100ColorMap() else: colorize.is_term = False else: colorize.is_term = False if colorize.is_term: string = colorize.cmap.colorize(string, rgb, ansi, bg, ansi_bg) return string
Inspect and potentially mutate the given handler's arguments. The args list and kw dictionary may be freely modified, though invalid arguments to the handler will fail. def mutate(self, context, handler, args, kw): """Inspect and potentially mutate the given handler's arguments. The args list and kw dictionary may be freely modified, though invalid arguments to the handler will fail. """ def cast(arg, val): if arg not in annotations: return cast = annotations[key] try: val = cast(val) except (ValueError, TypeError) as e: parts = list(e.args) parts[0] = parts[0] + " processing argument '{}'".format(arg) e.args = tuple(parts) raise return val annotations = getattr(handler.__func__ if hasattr(handler, '__func__') else handler, '__annotations__', None) if not annotations: return argspec = getfullargspec(handler) arglist = list(argspec.args) if ismethod(handler): del arglist[0] for i, value in enumerate(list(args)): key = arglist[i] if key in annotations: args[i] = cast(key, value) # Convert keyword arguments for key, value in list(items(kw)): if key in annotations: kw[key] = cast(key, value)
Transform the value returned by the controller endpoint. This extension transforms returned values if the endpoint has a return type annotation. def transform(self, context, handler, result): """Transform the value returned by the controller endpoint. This extension transforms returned values if the endpoint has a return type annotation. """ handler = handler.__func__ if hasattr(handler, '__func__') else handler annotation = getattr(handler, '__annotations__', {}).get('return', None) if annotation: return (annotation, result) return result
Parse command-line arguments. def process_args(): """ Parse command-line arguments. """ parser = argparse.ArgumentParser( description=("A script which can be run to tune a NeuroML 2 model against a number of target properties. Work in progress!")) parser.add_argument('prefix', type=str, metavar='<prefix>', help="Prefix for optimisation run") parser.add_argument('neuromlFile', type=str, metavar='<neuromlFile>', help="NeuroML2 file containing model") parser.add_argument('target', type=str, metavar='<target>', help="Target in NeuroML2 model") parser.add_argument('parameters', type=str, metavar='<parameters>', help="List of parameter to adjust") parser.add_argument('maxConstraints', type=str, metavar='<max_constraints>', help="Max values for parameters") parser.add_argument('minConstraints', type=str, metavar='<min_constraints>', help="Min values for parameters") parser.add_argument('targetData', type=str, metavar='<targetData>', help="List of name/value pairs for properties extracted from data to judge fitness against") parser.add_argument('weights', type=str, metavar='<weights>', help="Weights to assign to each target name/value pair") parser.add_argument('-simTime', type=float, metavar='<simTime>', default=DEFAULTS['simTime'], help="Simulation duration") parser.add_argument('-dt', type=float, metavar='<dt>', default=DEFAULTS['dt'], help="Simulation timestep") parser.add_argument('-analysisStartTime', type=float, metavar='<analysisStartTime>', default=DEFAULTS['analysisStartTime'], help="Analysis start time") parser.add_argument('-populationSize', type=int, metavar='<populationSize>', default=DEFAULTS['populationSize'], help="Population size") parser.add_argument('-maxEvaluations', type=int, metavar='<maxEvaluations>', default=DEFAULTS['maxEvaluations'], help="Maximum evaluations") parser.add_argument('-numSelected', type=int, metavar='<numSelected>', default=DEFAULTS['numSelected'], help="Number selected") parser.add_argument('-numOffspring', type=int, metavar='<numOffspring>', default=DEFAULTS['numOffspring'], help="Number offspring") parser.add_argument('-mutationRate', type=float, metavar='<mutationRate>', default=DEFAULTS['mutationRate'], help="Mutation rate") parser.add_argument('-numElites', type=int, metavar='<numElites>', default=DEFAULTS['numElites'], help="Number of elites") parser.add_argument('-numParallelEvaluations', type=int, metavar='<numParallelEvaluations>', default=DEFAULTS['numParallelEvaluations'], help="Number of evaluations to run in parallel") parser.add_argument('-seed', type=int, metavar='<seed>', default=DEFAULTS['seed'], help="Seed for optimiser") parser.add_argument('-simulator', type=str, metavar='<simulator>', default=DEFAULTS['simulator'], help="Simulator to run") parser.add_argument('-knownTargetValues', type=str, metavar='<knownTargetValues>', help="List of name/value pairs which represent the known values of the target parameters") parser.add_argument('-nogui', action='store_true', default=DEFAULTS['nogui'], help="Should GUI elements be supressed?") parser.add_argument('-showPlotAlready', action='store_true', default=DEFAULTS['showPlotAlready'], help="Should generated plots be suppressed until show() called?") parser.add_argument('-verbose', action='store_true', default=DEFAULTS['verbose'], help="Verbose mode") parser.add_argument('-dryRun', action='store_true', default=DEFAULTS['dryRun'], help="Dry run; just print setup information") parser.add_argument('-extraReportInfo', type=str, metavar='<extraReportInfo>', default=DEFAULTS['extraReportInfo'], help='Extra tag/value pairs can be put into the report.json: -extraReportInfo=["tag":"value"]') parser.add_argument('-cleanup', action='store_true', default=DEFAULTS['cleanup'], help="Should (some) generated files, e.g. *.dat, be deleted as optimisation progresses?") return parser.parse_args()
Parse command-line arguments. def process_args(): """ Parse command-line arguments. """ parser = argparse.ArgumentParser(description="A file for overlaying POVRay files generated from NeuroML by NeuroML1ToPOVRay.py with cell activity (e.g. as generated from a neuroConstruct simulation)") parser.add_argument('prefix', type=str, metavar='<network prefix>', help='Prefix for files in PovRay, e.g. use PREFIX is files are PREFIX.pov, PREFIX_net.inc, etc.') parser.add_argument('-activity', action='store_true', default=False, help="If this is specified, overlay network activity (not tested!!)") parser.add_argument('-maxV', type=float, metavar='<maxV>', default=50.0, help='Max voltage for colour scale in mV') parser.add_argument('-minV', type=float, metavar='<minV>', default=-90.0, help='Min voltage for colour scale in mV') parser.add_argument('-startTime', type=float, metavar='<startTime>', default=0, help='Time in ms at which to start overlaying the simulation activity') parser.add_argument('-endTime', type=float, metavar='<endTime>', default=100, help='End time of simulation activity in ms') parser.add_argument('-title', type=str, metavar='<title>', default='Movie generated from neuroConstruct simulation', help='Title for movie') parser.add_argument('-left', type=str, metavar='<left info>', default='', help='Text on left') parser.add_argument('-frames', type=int, metavar='<frames>', default=100, help='Number of frames') parser.add_argument('-name', type=str, metavar='<Movie name>', default='output', help='Movie name') return parser.parse_args()
Tornado's HTTPServer. This is a high quality asynchronous server with many options. For details, please visit: http://www.tornadoweb.org/en/stable/httpserver.html#http-server def serve(application, host='127.0.0.1', port=8080, **options): """Tornado's HTTPServer. This is a high quality asynchronous server with many options. For details, please visit: http://www.tornadoweb.org/en/stable/httpserver.html#http-server """ # Wrap our our WSGI application (potentially stack) in a Tornado adapter. container = tornado.wsgi.WSGIContainer(application) # Spin up a Tornado HTTP server using this container. http_server = tornado.httpserver.HTTPServer(container, **options) http_server.listen(int(port), host) # Start and block on the Tornado IO loop. tornado.ioloop.IOLoop.instance().start()
Parse command line arguments def parse_arguments(): """Parse command line arguments""" import argparse parser = argparse.ArgumentParser( description=('pyNeuroML v%s: Python utilities for NeuroML2' % __version__ + "\n libNeuroML v%s"%(neuroml.__version__) + "\n jNeuroML v%s"%JNEUROML_VERSION), usage=('pynml [-h|--help] [<shared options>] ' '<one of the mutually-exclusive options>'), formatter_class=argparse.RawTextHelpFormatter ) shared_options = parser.add_argument_group( title='Shared options', description=('These options can be added to any of the ' 'mutually-exclusive options') ) shared_options.add_argument( '-verbose', action='store_true', default=DEFAULTS['v'], help='Verbose output' ) shared_options.add_argument( '-java_max_memory', metavar='MAX', default=DEFAULTS['default_java_max_memory'], help=('Java memory for jNeuroML, e.g. 400M, 2G (used in\n' '-Xmx argument to java)') ) shared_options.add_argument( '-nogui', action='store_true', default=DEFAULTS['nogui'], help=('Suppress GUI,\n' 'i.e. show no plots, just save results') ) shared_options.add_argument( 'lems_file', type=str, metavar='<LEMS/NeuroML 2 file>', help='LEMS/NeuroML 2 file to process' ) mut_exc_opts_grp = parser.add_argument_group( title='Mutually-exclusive options', description='Only one of these options can be selected' ) mut_exc_opts = mut_exc_opts_grp.add_mutually_exclusive_group(required=False) mut_exc_opts.add_argument( '-sedml', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert\n' 'simulation settings (duration, dt, what to save)\n' 'to SED-ML format') ) mut_exc_opts.add_argument( '-neuron', nargs=argparse.REMAINDER, help=('(Via jNeuroML) Load a LEMS file, and convert it to\n' 'NEURON format.\n' 'The full format of the \'-neuron\' option is:\n' '-neuron [-nogui] [-run] [-outputdir dir] <LEMS file>\n' ' -nogui\n' ' do not generate gtaphical elements in NEURON,\n' ' just run, save data, and quit\n' ' -run\n' ' compile NMODL files and run the main NEURON\n' ' hoc file (Linux only currently)\n' ' -outputdir <dir>\n' ' generate NEURON files in directory <dir>\n' ' <LEMS file>\n' ' the LEMS file to use') ) mut_exc_opts.add_argument( '-svg', action='store_true', help=('(Via jNeuroML) Convert NeuroML2 file (network & cells)\n' 'to SVG format view of 3D structure') ) mut_exc_opts.add_argument( '-png', action='store_true', help=('(Via jNeuroML) Convert NeuroML2 file (network & cells)\n' 'to PNG format view of 3D structure') ) mut_exc_opts.add_argument( '-dlems', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to dLEMS format, a distilled form of LEMS in JSON') ) mut_exc_opts.add_argument( '-vertex', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to VERTEX format') ) mut_exc_opts.add_argument( '-xpp', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to XPPAUT format') ) mut_exc_opts.add_argument( '-dnsim', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to DNsim format') ) mut_exc_opts.add_argument( '-brian', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to Brian format') ) mut_exc_opts.add_argument( '-sbml', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to SBML format') ) mut_exc_opts.add_argument( '-matlab', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to MATLAB format') ) mut_exc_opts.add_argument( '-cvode', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to C format using CVODE package') ) mut_exc_opts.add_argument( '-nineml', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to NineML format') ) mut_exc_opts.add_argument( '-spineml', action='store_true', help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to SpineML format') ) mut_exc_opts.add_argument( '-sbml-import', metavar=('<SBML file>', 'duration', 'dt'), nargs=3, help=('(Via jNeuroML) Load a SBML file, and convert it\n' 'toLEMS format using values for duration & dt\n' 'in ms (ignoring SBML units)') ) mut_exc_opts.add_argument( '-sbml-import-units', metavar=('<SBML file>', 'duration', 'dt'), nargs=3, help=('(Via jNeuroML) Load a SBML file, and convert it\n' 'to LEMS format using values for duration & dt\n' 'in ms (attempt to extract SBML units; ensure units\n' 'are valid in the SBML!)') ) mut_exc_opts.add_argument( '-vhdl', metavar=('neuronid', '<LEMS file>'), nargs=2, help=('(Via jNeuroML) Load a LEMS file, and convert it\n' 'to VHDL format') ) mut_exc_opts.add_argument( '-graph', metavar=('level'), nargs=1, help=('Load a NeuroML file, and convert it to a graph using\n' 'GraphViz. Detail is set by level (1, 2, etc.)') ) mut_exc_opts.add_argument( '-matrix', metavar=('level'), nargs=1, help=('Load a NeuroML file, and convert it to a matrix displaying\n' 'connectivity. Detail is set by level (1, 2, etc.)') ) mut_exc_opts.add_argument( '-validate', action='store_true', help=('(Via jNeuroML) Validate NeuroML2 file(s) against the\n' 'latest Schema') ) mut_exc_opts.add_argument( '-validatev1', action='store_true', help=('(Via jNeuroML) Validate NeuroML file(s) against the\n' 'v1.8.1 Schema') ) return parser.parse_args()
Or better just use nml2_doc.summary(show_includes=False) def quick_summary(nml2_doc): ''' Or better just use nml2_doc.summary(show_includes=False) ''' info = 'Contents of NeuroML 2 document: %s\n'%nml2_doc.id membs = inspect.getmembers(nml2_doc) for memb in membs: if isinstance(memb[1], list) and len(memb[1])>0 \ and not memb[0].endswith('_'): info+=' %s:\n ['%memb[0] for entry in memb[1]: extra = '???' extra = entry.name if hasattr(entry,'name') else extra extra = entry.href if hasattr(entry,'href') else extra extra = entry.id if hasattr(entry,'id') else extra info+=" %s (%s),"%(entry, extra) info+=']\n' return info
Execute a command in specific working directory def execute_command_in_dir(command, directory, verbose=DEFAULTS['v'], prefix="Output: ", env=None): """Execute a command in specific working directory""" if os.name == 'nt': directory = os.path.normpath(directory) print_comment("Executing: (%s) in directory: %s" % (command, directory), verbose) if env is not None: print_comment("Extra env variables %s" % (env), verbose) try: if os.name == 'nt': return_string = subprocess.check_output(command, cwd=directory, shell=True, env=env, close_fds=False) else: return_string = subprocess.check_output(command, cwd=directory, shell=True, stderr=subprocess.STDOUT, env=env, close_fds=True) return_string = return_string.decode("utf-8") # For Python 3 print_comment('Command completed. Output: \n %s%s' % \ (prefix,return_string.replace('\n','\n '+prefix)), verbose) return return_string except AttributeError: # For python 2.6... print_comment_v('Assuming Python 2.6...') return_string = subprocess.Popen(command, cwd=directory, shell=True, stdout=subprocess.PIPE).communicate()[0] return return_string except subprocess.CalledProcessError as e: print_comment_v('*** Problem running command: \n %s'%e) print_comment_v('%s%s'%(prefix,e.output.decode().replace('\n','\n'+prefix))) return None except: print_comment_v('*** Unknown problem running command: %s'%e) return None print_comment("Finished execution", verbose)
print_comment_v(exec_str) def evaluate_component(comp_type, req_variables={}, parameter_values={}): print_comment('Evaluating %s with req:%s; params:%s'%(comp_type.name,req_variables,parameter_values)) exec_str = '' return_vals = {} from math import exp for p in parameter_values: exec_str+='%s = %s\n'%(p, get_value_in_si(parameter_values[p])) for r in req_variables: exec_str+='%s = %s\n'%(r, get_value_in_si(req_variables[r])) for c in comp_type.Constant: exec_str+='%s = %s\n'%(c.name, get_value_in_si(c.value)) for d in comp_type.Dynamics: for dv in d.DerivedVariable: exec_str+='%s = %s\n'%(dv.name, dv.value) exec_str+='return_vals["%s"] = %s\n'%(dv.name, dv.name) for cdv in d.ConditionalDerivedVariable: for case in cdv.Case: if case.condition: cond = case.condition.replace('.neq.','!=').replace('.eq.','==').replace('.gt.','<').replace('.lt.','<') exec_str+='if ( %s ): %s = %s \n'%(cond, cdv.name, case.value) else: exec_str+='else: %s = %s \n'%(cdv.name, case.value) exec_str+='\n' exec_str+='return_vals["%s"] = %s\n'%(cdv.name, cdv.name) '''print_comment_v(exec_str)''' exec(exec_str) return return_vals
Executed after dispatch has returned and the response populated, prior to anything being sent to the client. def after(self, context, exc=None): """Executed after dispatch has returned and the response populated, prior to anything being sent to the client.""" duration = context._duration = round((time.time() - context._start_time) * 1000) # Convert to ms. delta = unicode(duration) # Default response augmentation. if self.header: context.response.headers[self.header] = delta if self.log: self.log("Response generated in " + delta + " seconds.", extra=dict( duration = duration, request = id(context) ))
Apply a flat namespace transformation to recreate (in some respects) a rich structure. This applies several transformations, which may be nested: `foo` (singular): define a simple value named `foo` `foo` (repeated): define a simple value for placement in an array named `foo` `foo[]`: define a simple value for placement in an array, even if there is only one `foo.<id>`: define a simple value to place in the `foo` array at the identified index By nesting, you may define deeper, more complex structures: `foo.bar`: define a value for the named element `bar` of the `foo` dictionary `foo.<id>.bar`: define a `bar` dictionary element on the array element marked by that ID References to `<id>` represent numeric "attributes", which makes the parent reference be treated as an array, not a dictionary. Exact indexes might not be able to be preserved if there are voids; Python lists are not sparse. No validation of values is performed. def _process_flat_kwargs(source, kwargs): """Apply a flat namespace transformation to recreate (in some respects) a rich structure. This applies several transformations, which may be nested: `foo` (singular): define a simple value named `foo` `foo` (repeated): define a simple value for placement in an array named `foo` `foo[]`: define a simple value for placement in an array, even if there is only one `foo.<id>`: define a simple value to place in the `foo` array at the identified index By nesting, you may define deeper, more complex structures: `foo.bar`: define a value for the named element `bar` of the `foo` dictionary `foo.<id>.bar`: define a `bar` dictionary element on the array element marked by that ID References to `<id>` represent numeric "attributes", which makes the parent reference be treated as an array, not a dictionary. Exact indexes might not be able to be preserved if there are voids; Python lists are not sparse. No validation of values is performed. """ ordered_arrays = [] # Process arguments one at a time and apply them to the kwargs passed in. for name, value in source.items(): container = kwargs if '.' in name: parts = name.split('.') name = name.rpartition('.')[2] for target, following in zip(parts[:-1], parts[1:]): if following.isnumeric(): # Prepare any use of numeric IDs. container.setdefault(target, [{}]) if container[target] not in ordered_arrays: ordered_arrays.append(container[target]) container = container[target][0] continue container = container.setdefault(target, {}) if name.endswith('[]'): # `foo[]` or `foo.bar[]` etc. name = name[:-2] container.setdefault(name, []) container[name].append(value) continue if name.isnumeric() and container is not kwargs: # trailing identifiers, `foo.<id>` container[int(name)] = value continue if name in container: if not isinstance(container[name], list): container[name] = [container[name]] container[name].append(value) continue container[name] = value for container in ordered_arrays: elements = container[0] del container[:] container.extend(value for name, value in sorted(elements.items()))
Parse command-line arguments. def process_args(): """ Parse command-line arguments. """ parser = argparse.ArgumentParser(description="A file for converting NeuroML v2 files into POVRay files for 3D rendering") parser.add_argument('neuroml_file', type=str, metavar='<NeuroML file>', help='NeuroML (version 2 beta 3+) file to be converted to PovRay format (XML or HDF5 format)') parser.add_argument('-split', action='store_true', default=False, help="If this is specified, generate separate pov files for cells & network. Default is false") parser.add_argument('-background', type=str, metavar='<background colour>', default=_WHITE, help='Colour of background, e.g. <0,0,0,0.55>') parser.add_argument('-movie', action='store_true', default=False, help="If this is specified, generate a ini file for generating a sequence of frames for a movie of the 3D structure") parser.add_argument('-inputs', action='store_true', default=False, help="If this is specified, show the locations of (synaptic, current clamp, etc.) inputs into the cells of the network") parser.add_argument('-conns', action='store_true', default=False, help="If this is specified, show the connections present in the network with lines") parser.add_argument('-conn_points', action='store_true', default=False, help="If this is specified, show the end points of the connections present in the network") parser.add_argument('-v', action='store_true', default=False, help="Verbose output") parser.add_argument('-frames', type=int, metavar='<frames>', default=36, help='Number of frames in movie') parser.add_argument('-posx', type=float, metavar='<position offset x>', default=0, help='Offset position in x dir (0 is centre, 1 is top)') parser.add_argument('-posy', type=float, metavar='<position offset y>', default=0, help='Offset position in y dir (0 is centre, 1 is top)') parser.add_argument('-posz', type=float, metavar='<position offset z>', default=0, help='Offset position in z dir (0 is centre, 1 is top)') parser.add_argument('-viewx', type=float, metavar='<view offset x>', default=0, help='Offset viewing point in x dir (0 is centre, 1 is top)') parser.add_argument('-viewy', type=float, metavar='<view offset y>', default=0, help='Offset viewing point in y dir (0 is centre, 1 is top)') parser.add_argument('-viewz', type=float, metavar='<view offset z>', default=0, help='Offset viewing point in z dir (0 is centre, 1 is top)') parser.add_argument('-scalex', type=float, metavar='<scale position x>', default=1, help='Scale position from network in x dir') parser.add_argument('-scaley', type=float, metavar='<scale position y>', default=1.5, help='Scale position from network in y dir') parser.add_argument('-scalez', type=float, metavar='<scale position z>', default=1, help='Scale position from network in z dir') parser.add_argument('-mindiam', type=float, metavar='<minimum diameter dendrites/axons>', default=0, help='Minimum diameter for dendrites/axons (to improve visualisations)') parser.add_argument('-plane', action='store_true', default=False, help="If this is specified, add a 2D plane below cell/network") parser.add_argument('-segids', action='store_true', default=False, help="Show segment ids") return parser.parse_args()
Prepare the incoming configuration and ensure certain expected values are present. For example, this ensures BaseExtension is included in the extension list, and populates the logging config. def _configure(self, config): """Prepare the incoming configuration and ensure certain expected values are present. For example, this ensures BaseExtension is included in the extension list, and populates the logging config. """ config = config or dict() # We really need this to be there. if 'extensions' not in config: config['extensions'] = list() if not any(isinstance(ext, BaseExtension) for ext in config['extensions']): # Always make sure the BaseExtension is present since request/response objects are handy. config['extensions'].insert(0, BaseExtension()) if not any(isinstance(ext, arguments.ArgumentExtension) for ext in config['extensions']): # Prepare a default set of argument mutators. config['extensions'].extend([ arguments.ValidateArgumentsExtension(), arguments.ContextArgsExtension(), arguments.RemainderArgsExtension(), arguments.QueryStringArgsExtension(), arguments.FormEncodedKwargsExtension(), arguments.JSONKwargsExtension(), ]) config['extensions'].append(self) # Allow the application object itself to register callbacks. try: addLoggingLevel('trace', logging.DEBUG - 5) except AttributeError: pass # Tests are skipped on these as we have no particular need to test Python's own logging mechanism. level = config.get('logging', {}).get('level', None) if level: # pragma: no cover logging.basicConfig(level=getattr(logging, level.upper())) elif 'logging' in config: # pragma: no cover logging.config.dictConfig(config['logging']) return config
Initiate a web server service to serve this application. You can always use the Application instance as a bare WSGI application, of course. This method is provided as a convienence. Pass in the name of the service you wish to use, and any additional configuration options appropriate for that service. Almost all services accept `host` and `port` options, some also allow you to specify an on-disk `socket`. By default all web servers will listen to `127.0.0.1` (loopback only) on port 8080. def serve(self, service='auto', **options): # pragma: no cover """Initiate a web server service to serve this application. You can always use the Application instance as a bare WSGI application, of course. This method is provided as a convienence. Pass in the name of the service you wish to use, and any additional configuration options appropriate for that service. Almost all services accept `host` and `port` options, some also allow you to specify an on-disk `socket`. By default all web servers will listen to `127.0.0.1` (loopback only) on port 8080. """ service = load(service, 'web.server') # We don't bother with a full registry for these one-time lookups. try: service(self, **options) except KeyboardInterrupt: # We catch this as SIG_TERM or ^C are basically the only ways to stop most servers. pass # Notify extensions that the service has returned and we are exiting. for ext in self.__context.extension.signal.stop: ext(self.__context)
Process a single WSGI request/response cycle. This is the WSGI handler for WebCore. Depending on the presence of extensions providing WSGI middleware, the `__call__` attribute of the Application instance will either become this, or become the outermost middleware callable. Most apps won't utilize middleware, the extension interface is preferred for most operations in WebCore. They allow for code injection at various intermediary steps in the processing of a request and response. def application(self, environ, start_response): """Process a single WSGI request/response cycle. This is the WSGI handler for WebCore. Depending on the presence of extensions providing WSGI middleware, the `__call__` attribute of the Application instance will either become this, or become the outermost middleware callable. Most apps won't utilize middleware, the extension interface is preferred for most operations in WebCore. They allow for code injection at various intermediary steps in the processing of a request and response. """ context = environ['wc.context'] = self.RequestContext(environ=environ) signals = context.extension.signal # Announce the start of a request cycle. This executes `prepare` and `before` callbacks in the correct order. for ext in signals.pre: ext(context) # Identify the endpoint for this request. is_endpoint, handler = context.dispatch(context, context.root, context.environ['PATH_INFO']) if is_endpoint: try: result = self._execute_endpoint(context, handler, signals) # Process the endpoint. except Exception as e: log.exception("Caught exception attempting to execute the endpoint.") result = HTTPInternalServerError(str(e) if __debug__ else "Please see the logs.") if 'debugger' in context.extension.feature: context.response = result for ext in signals.after: ext(context) # Allow signals to clean up early. raise else: # If no endpoint could be resolved, that's a 404. result = HTTPNotFound("Dispatch failed." if __debug__ else None) if __debug__: log.debug("Result prepared, identifying view handler.", extra=dict( request = id(context), result = safe_name(type(result)) )) # Identify a view capable of handling this result. for view in context.view(result): if view(context, result): break else: # We've run off the bottom of the list of possible views. raise TypeError("No view could be found to handle: " + repr(type(result))) if __debug__: log.debug("View identified, populating response.", extra=dict( request = id(context), view = repr(view), )) for ext in signals.after: ext(context) def capture_done(response): for chunk in response: yield chunk for ext in signals.done: ext(context) # This is really long due to the fact we don't want to capture the response too early. # We need anything up to this point to be able to simply replace `context.response` if needed. return capture_done(context.response.conditional_response_app(environ, start_response))
Swaps the alignment so that the reference becomes the query and vice-versa. Swaps their names, coordinates etc. The frame is not changed def _swap(self): '''Swaps the alignment so that the reference becomes the query and vice-versa. Swaps their names, coordinates etc. The frame is not changed''' self.ref_start, self.qry_start = self.qry_start, self.ref_start self.ref_end, self.qry_end = self.qry_end, self.ref_end self.hit_length_ref, self.hit_length_qry = self.hit_length_qry, self.hit_length_ref self.ref_length, self.qry_length = self.qry_length, self.ref_length self.ref_name, self.qry_name = self.qry_name, self.ref_name
Returns a pyfastaq.intervals.Interval object of the start and end coordinates in the query sequence def qry_coords(self): '''Returns a pyfastaq.intervals.Interval object of the start and end coordinates in the query sequence''' return pyfastaq.intervals.Interval(min(self.qry_start, self.qry_end), max(self.qry_start, self.qry_end))
Returns a pyfastaq.intervals.Interval object of the start and end coordinates in the reference sequence def ref_coords(self): '''Returns a pyfastaq.intervals.Interval object of the start and end coordinates in the reference sequence''' return pyfastaq.intervals.Interval(min(self.ref_start, self.ref_end), max(self.ref_start, self.ref_end))
Returns true iff the direction of the alignment is the same in the reference and the query def on_same_strand(self): '''Returns true iff the direction of the alignment is the same in the reference and the query''' return (self.ref_start < self.ref_end) == (self.qry_start < self.qry_end)
Returns true iff the alignment is of a sequence to itself: names and all coordinates are the same and 100 percent identity def is_self_hit(self): '''Returns true iff the alignment is of a sequence to itself: names and all coordinates are the same and 100 percent identity''' return self.ref_name == self.qry_name \ and self.ref_start == self.qry_start \ and self.ref_end == self.qry_end \ and self.percent_identity == 100
Changes the coordinates as if the query sequence has been reverse complemented def reverse_query(self): '''Changes the coordinates as if the query sequence has been reverse complemented''' self.qry_start = self.qry_length - self.qry_start - 1 self.qry_end = self.qry_length - self.qry_end - 1
Changes the coordinates as if the reference sequence has been reverse complemented def reverse_reference(self): '''Changes the coordinates as if the reference sequence has been reverse complemented''' self.ref_start = self.ref_length - self.ref_start - 1 self.ref_end = self.ref_length - self.ref_end - 1
Returns the alignment as a line in MSPcrunch format. The columns are space-separated and are: 1. score 2. percent identity 3. match start in the query sequence 4. match end in the query sequence 5. query sequence name 6. subject sequence start 7. subject sequence end 8. subject sequence name def to_msp_crunch(self): '''Returns the alignment as a line in MSPcrunch format. The columns are space-separated and are: 1. score 2. percent identity 3. match start in the query sequence 4. match end in the query sequence 5. query sequence name 6. subject sequence start 7. subject sequence end 8. subject sequence name''' # we don't know the alignment score. Estimate it. This approximates 1 for a match. aln_score = int(self.percent_identity * 0.005 * (self.hit_length_ref + self.hit_length_qry)) return ' '.join(str(x) for x in [ aln_score, '{0:.2f}'.format(self.percent_identity), self.qry_start + 1, self.qry_end + 1, self.qry_name, self.ref_start + 1, self.ref_end + 1, self.ref_name ])
Given a reference position and a list of variants ([variant.Variant]), works out the position in the query sequence, accounting for indels. Returns a tuple: (position, True|False), where second element is whether or not the ref_coord lies in an indel. If it is, then returns the corresponding start position of the indel in the query def qry_coords_from_ref_coord(self, ref_coord, variant_list): '''Given a reference position and a list of variants ([variant.Variant]), works out the position in the query sequence, accounting for indels. Returns a tuple: (position, True|False), where second element is whether or not the ref_coord lies in an indel. If it is, then returns the corresponding start position of the indel in the query''' if self.ref_coords().distance_to_point(ref_coord) > 0: raise Error('Cannot get query coord in qry_coords_from_ref_coord because given ref_coord ' + str(ref_coord) + ' does not lie in nucmer alignment:\n' + str(self)) indel_variant_indexes = [] for i in range(len(variant_list)): if variant_list[i].var_type not in {variant.INS, variant.DEL}: continue if not self.intersects_variant(variant_list[i]): continue if variant_list[i].ref_start <= ref_coord <= variant_list[i].ref_end: return variant_list[i].qry_start, True elif variant_list[i].ref_start < ref_coord: indel_variant_indexes.append(i) distance = ref_coord - min(self.ref_start, self.ref_end) for i in indel_variant_indexes: if variant_list[i].var_type == variant.INS: distance += len(variant_list[i].qry_base) else: assert variant_list[i].var_type == variant.DEL distance -= len(variant_list[i].ref_base) if self.on_same_strand(): return min(self.qry_start, self.qry_end) + distance, False else: return max(self.qry_start, self.qry_end) - distance, False
Construct the nucmer command def _nucmer_command(self, ref, qry, outprefix): '''Construct the nucmer command''' if self.use_promer: command = 'promer' else: command = 'nucmer' command += ' -p ' + outprefix if self.breaklen is not None: command += ' -b ' + str(self.breaklen) if self.diagdiff is not None and not self.use_promer: command += ' -D ' + str(self.diagdiff) if self.diagfactor: command += ' -d ' + str(self.diagfactor) if self.maxgap: command += ' -g ' + str(self.maxgap) if self.maxmatch: command += ' --maxmatch' if self.mincluster is not None: command += ' -c ' + str(self.mincluster) if not self.simplify and not self.use_promer: command += ' --nosimplify' return command + ' ' + ref + ' ' + qry
Construct delta-filter command def _delta_filter_command(self, infile, outfile): '''Construct delta-filter command''' command = 'delta-filter' if self.min_id is not None: command += ' -i ' + str(self.min_id) if self.min_length is not None: command += ' -l ' + str(self.min_length) return command + ' ' + infile + ' > ' + outfile
Construct show-coords command def _show_coords_command(self, infile, outfile): '''Construct show-coords command''' command = 'show-coords -dTlro' if not self.coords_header: command += ' -H' return command + ' ' + infile + ' > ' + outfile
Write commands into a bash script def _write_script(self, script_name, ref, qry, outfile): '''Write commands into a bash script''' f = pyfastaq.utils.open_file_write(script_name) print(self._nucmer_command(ref, qry, 'p'), file=f) print(self._delta_filter_command('p.delta', 'p.delta.filter'), file=f) print(self._show_coords_command('p.delta.filter', outfile), file=f) if self.show_snps: print(self._show_snps_command('p.delta.filter', outfile + '.snps'), file=f) pyfastaq.utils.close(f)
Change to a temp directory Run bash script containing commands Place results in specified output file Clean up temp directory def run(self): ''' Change to a temp directory Run bash script containing commands Place results in specified output file Clean up temp directory ''' qry = os.path.abspath(self.qry) ref = os.path.abspath(self.ref) outfile = os.path.abspath(self.outfile) tmpdir = tempfile.mkdtemp(prefix='tmp.run_nucmer.', dir=os.getcwd()) original_dir = os.getcwd() os.chdir(tmpdir) script = 'run_nucmer.sh' self._write_script(script, ref, qry, outfile) syscall.run('bash ' + script, verbose=self.verbose) os.chdir(original_dir) shutil.rmtree(tmpdir)
Indels are reported over multiple lines, 1 base insertion or deletion per line. This method extends the current variant by 1 base if it's an indel and adjacent to the new SNP and returns True. If the current variant is a SNP, does nothing and returns False def update_indel(self, nucmer_snp): '''Indels are reported over multiple lines, 1 base insertion or deletion per line. This method extends the current variant by 1 base if it's an indel and adjacent to the new SNP and returns True. If the current variant is a SNP, does nothing and returns False''' new_variant = Variant(nucmer_snp) if self.var_type not in [INS, DEL] \ or self.var_type != new_variant.var_type \ or self.qry_name != new_variant.qry_name \ or self.ref_name != new_variant.ref_name \ or self.reverse != new_variant.reverse: return False if self.var_type == INS \ and self.ref_start == new_variant.ref_start \ and self.qry_end + 1 == new_variant.qry_start: self.qry_base += new_variant.qry_base self.qry_end += 1 return True if self.var_type == DEL \ and self.qry_start == new_variant.qry_start \ and self.ref_end + 1 == new_variant.ref_start: self.ref_base += new_variant.ref_base self.ref_end += 1 return True return False
Helper function to open the results file (coords file) and create alignment objects with the values in it def reader(fname): '''Helper function to open the results file (coords file) and create alignment objects with the values in it''' f = pyfastaq.utils.open_file_read(fname) for line in f: if line.startswith('[') or (not '\t' in line): continue yield alignment.Alignment(line) pyfastaq.utils.close(f)
Converts a coords file to a file in MSPcrunch format (for use with ACT, most likely). ACT ignores sequence names in the crunch file, and just looks at the numbers. To make a compatible file, the coords all must be shifted appropriately, which can be done by providing both the ref_fai and qry_fai options. Both or neither of these must be used, otherwise an error will be thrown. def convert_to_msp_crunch(infile, outfile, ref_fai=None, qry_fai=None): '''Converts a coords file to a file in MSPcrunch format (for use with ACT, most likely). ACT ignores sequence names in the crunch file, and just looks at the numbers. To make a compatible file, the coords all must be shifted appropriately, which can be done by providing both the ref_fai and qry_fai options. Both or neither of these must be used, otherwise an error will be thrown.''' fai_files = {ref_fai, qry_fai} if None in fai_files and len(fai_files) != 1: print(fai_files) raise Error('Error in convert_to_msp_crunch. Must use both of ref_fai and qry_fai, or neither of them') if ref_fai is not None: assert qry_fai is not None ref_offsets = pyfastaq.tasks.length_offsets_from_fai(ref_fai) qry_offsets = pyfastaq.tasks.length_offsets_from_fai(qry_fai) file_reader = reader(infile) f_out = pyfastaq.utils.open_file_write(outfile) for aln in file_reader: if ref_fai is not None: aln.ref_start += ref_offsets[aln.ref_name] aln.ref_end += ref_offsets[aln.ref_name] aln.qry_start += qry_offsets[aln.qry_name] aln.qry_end += qry_offsets[aln.qry_name] print(aln.to_msp_crunch(), file=f_out) pyfastaq.utils.close(f_out)
Common handler for all the HTTP requests. def _request(self, method, url, params=None, headers=None, data=None): """Common handler for all the HTTP requests.""" if not params: params = {} # set default headers if not headers: headers = { 'accept': '*/*' } if method == 'POST' or method == 'PUT': headers.update({'Content-Type': 'application/json'}) try: response = requests.request(method=method, url=self.host + self.key + url, params=params, headers=headers, data=data) try: response.raise_for_status() response_code = response.status_code success = True if response_code // 100 == 2 else False if response.text: try: data = response.json() except ValueError: data = response.content else: data = '' response_headers = response.headers return BurpResponse(success=success, response_code=response_code, data=data, response_headers=response_headers) except ValueError as e: return BurpResponse(success=False, message="JSON response could not be decoded {}.".format(e)) except requests.exceptions.HTTPError as e: if response.status_code == 400: return BurpResponse(success=False, response_code=400, message='Bad Request') else: return BurpResponse( message='There was an error while handling the request. {}'.format(response.content), success=False) except Exception as e: return BurpResponse(success=False, message='Eerror is %s' % e)
Sphinx role for linking to a user profile. Defaults to linking to Github profiles, but the profile URIS can be configured via the ``issues_user_uri`` config value. Examples: :: :user:`sloria` Anchor text also works: :: :user:`Steven Loria <sloria>` def user_role(name, rawtext, text, lineno, inliner, options=None, content=None): """Sphinx role for linking to a user profile. Defaults to linking to Github profiles, but the profile URIS can be configured via the ``issues_user_uri`` config value. Examples: :: :user:`sloria` Anchor text also works: :: :user:`Steven Loria <sloria>` """ options = options or {} content = content or [] has_explicit_title, title, target = split_explicit_title(text) target = utils.unescape(target).strip() title = utils.unescape(title).strip() config = inliner.document.settings.env.app.config if config.issues_user_uri: ref = config.issues_user_uri.format(user=target) else: ref = "https://github.com/{0}".format(target) if has_explicit_title: text = title else: text = "@{0}".format(target) link = nodes.reference(text=text, refuri=ref, **options) return [link], []
Sphinx role for linking to a CVE on https://cve.mitre.org. Examples: :: :cve:`CVE-2018-17175` def cve_role(name, rawtext, text, lineno, inliner, options=None, content=None): """Sphinx role for linking to a CVE on https://cve.mitre.org. Examples: :: :cve:`CVE-2018-17175` """ options = options or {} content = content or [] has_explicit_title, title, target = split_explicit_title(text) target = utils.unescape(target).strip() title = utils.unescape(title).strip() ref = "https://cve.mitre.org/cgi-bin/cvename.cgi?name={0}".format(target) text = title if has_explicit_title else target link = nodes.reference(text=text, refuri=ref, **options) return [link], []
Write the given iterable of values (line) to the file as items on the same line. Any argument that stringifies to a string legal as a TSV data item can be written. Does not copy the line or build a big string in memory. def list_line(self, line): """ Write the given iterable of values (line) to the file as items on the same line. Any argument that stringifies to a string legal as a TSV data item can be written. Does not copy the line or build a big string in memory. """ if len(line) == 0: return self.stream.write(str(line[0])) for item in line[1:]: self.stream.write("\t") self.stream.write(str(item)) self.stream.write("\n")
Parse metadata to obtain list of mustache templates, then load those templates. def prepare(doc): """ Parse metadata to obtain list of mustache templates, then load those templates. """ doc.mustache_files = doc.get_metadata('mustache') if isinstance(doc.mustache_files, basestring): # process single YAML value stored as string if not doc.mustache_files: doc.mustache_files = None # switch empty string back to None else: doc.mustache_files = [ doc.mustache_files ] # put non-empty string in list # with open('debug.txt', 'a') as the_file: # the_file.write(str(doc.mustache_files)) # the_file.write('\n') if doc.mustache_files is not None: doc.mustache_hashes = [yaml.load(open(file, 'r').read()) for file in doc.mustache_files] doc.mhash = { k: v for mdict in doc.mustache_hashes for k, v in mdict.items() } # combine list of dicts into a single dict doc.mrenderer = pystache.Renderer(escape=lambda u: u, missing_tags='strict') else: doc.mhash = None
Apply combined mustache template to all strings in document. def action(elem, doc): """ Apply combined mustache template to all strings in document. """ if type(elem) == Str and doc.mhash is not None: elem.text = doc.mrenderer.render(elem.text, doc.mhash) return elem
Determine the name of the callback to wrap around the json output. def get_callback(self, renderer_context): """ Determine the name of the callback to wrap around the json output. """ request = renderer_context.get('request', None) params = request and get_query_params(request) or {} return params.get(self.callback_parameter, self.default_callback)
Renders into jsonp, wrapping the json output in a callback function. Clients may set the callback function name using a query parameter on the URL, for example: ?callback=exampleCallbackName def render(self, data, accepted_media_type=None, renderer_context=None): """ Renders into jsonp, wrapping the json output in a callback function. Clients may set the callback function name using a query parameter on the URL, for example: ?callback=exampleCallbackName """ renderer_context = renderer_context or {} callback = self.get_callback(renderer_context) json = super(JSONPRenderer, self).render(data, accepted_media_type, renderer_context) return callback.encode(self.charset) + b'(' + json + b');'
Analyses the measurement with the given parameters :param measurementId: :return: def get(self, measurementId): """ Analyses the measurement with the given parameters :param measurementId: :return: """ logger.info('Loading raw data for ' + measurementId) measurement = self._measurementController.getMeasurement(measurementId, MeasurementStatus.COMPLETE) if measurement is not None: if measurement.inflate(): data = { name: { 'raw': { 'x': self._jsonify(data.raw('x')), 'y': self._jsonify(data.raw('y')), 'z': self._jsonify(data.raw('z')) }, 'vibration': { 'x': self._jsonify(data.vibration('x')), 'y': self._jsonify(data.vibration('y')), 'z': self._jsonify(data.vibration('z')) }, 'tilt': { 'x': self._jsonify(data.tilt('x')), 'y': self._jsonify(data.tilt('y')), 'z': self._jsonify(data.tilt('z')) } } for name, data in measurement.data.items() } return data, 200 else: return None, 404 else: return None, 404
Return Julian day count of given ISO year, week, and day def to_jd(year, week, day): '''Return Julian day count of given ISO year, week, and day''' return day + n_weeks(SUN, gregorian.to_jd(year - 1, 12, 28), week)
Return tuple of ISO (year, week, day) for Julian day def from_jd(jd): '''Return tuple of ISO (year, week, day) for Julian day''' year = gregorian.from_jd(jd)[0] day = jwday(jd) + 1 dayofyear = ordinal.from_jd(jd)[1] week = trunc((dayofyear - day + 10) / 7) # Reset year if week < 1: week = weeks_per_year(year - 1) year = year - 1 # Check that year actually has 53 weeks elif week == 53 and weeks_per_year(year) != 53: week = 1 year = year + 1 return year, week, day
Number of ISO weeks in a year def weeks_per_year(year): '''Number of ISO weeks in a year''' # 53 weeks: any year starting on Thursday and any leap year starting on Wednesday jan1 = jwday(gregorian.to_jd(year, 1, 1)) if jan1 == THU or (jan1 == WED and isleap(year)): return 53 else: return 52
For STScI GEIS files, need to do extra steps. def stsci(hdulist): """For STScI GEIS files, need to do extra steps.""" instrument = hdulist[0].header.get('INSTRUME', '') # Update extension header keywords if instrument in ("WFPC2", "FOC"): rootname = hdulist[0].header.get('ROOTNAME', '') filetype = hdulist[0].header.get('FILETYPE', '') for i in range(1, len(hdulist)): # Add name and extver attributes to match PyFITS data structure hdulist[i].name = filetype hdulist[i]._extver = i # Add extension keywords for this chip to extension hdulist[i].header['EXPNAME'] = (rootname, "9 character exposure identifier") hdulist[i].header['EXTVER']= (i, "extension version number") hdulist[i].header['EXTNAME'] = (filetype, "extension name") hdulist[i].header['INHERIT'] = (True, "inherit the primary header") hdulist[i].header['ROOTNAME'] = (rootname, "rootname of the observation set")
For STScI GEIS files, need to do extra steps. def stsci2(hdulist, filename): """For STScI GEIS files, need to do extra steps.""" # Write output file name to the primary header instrument = hdulist[0].header.get('INSTRUME', '') if instrument in ("WFPC2", "FOC"): hdulist[0].header['FILENAME'] = filename
Input GEIS files "input" will be read and a HDUList object will be returned. The user can use the writeto method to write the HDUList object to a FITS file. def readgeis(input): """Input GEIS files "input" will be read and a HDUList object will be returned. The user can use the writeto method to write the HDUList object to a FITS file. """ global dat cardLen = fits.Card.length # input file(s) must be of the form *.??h and *.??d if input[-1] != 'h' or input[-4] != '.': raise "Illegal input GEIS file name %s" % input data_file = input[:-1]+'d' _os = sys.platform if _os[:5] == 'linux' or _os[:5] == 'win32' or _os[:5] == 'sunos' or _os[:3] == 'osf' or _os[:6] == 'darwin': bytes_per_line = cardLen+1 else: raise "Platform %s is not supported (yet)." % _os geis_fmt = {'REAL':'f', 'INTEGER':'i', 'LOGICAL':'i','CHARACTER':'S'} end_card = 'END'+' '* (cardLen-3) # open input file im = open(input) # Generate the primary HDU cards = [] while 1: line = im.read(bytes_per_line)[:cardLen] line = line[:8].upper() + line[8:] if line == end_card: break cards.append(fits.Card.fromstring(line)) phdr = fits.Header(cards) im.close() _naxis0 = phdr.get('NAXIS', 0) _naxis = [phdr['NAXIS'+str(j)] for j in range(1, _naxis0+1)] _naxis.insert(0, _naxis0) _bitpix = phdr['BITPIX'] _psize = phdr['PSIZE'] if phdr['DATATYPE'][:4] == 'REAL': _bitpix = -_bitpix if _naxis0 > 0: size = reduce(lambda x,y:x*y, _naxis[1:]) data_size = abs(_bitpix) * size // 8 else: data_size = 0 group_size = data_size + _psize // 8 # decode the group parameter definitions, # group parameters will become extension header groups = phdr['GROUPS'] gcount = phdr['GCOUNT'] pcount = phdr['PCOUNT'] formats = [] bools = [] floats = [] _range = range(1, pcount+1) key = [phdr['PTYPE'+str(j)] for j in _range] comm = [phdr.cards['PTYPE'+str(j)].comment for j in _range] # delete group parameter definition header keywords _list = ['PTYPE'+str(j) for j in _range] + \ ['PDTYPE'+str(j) for j in _range] + \ ['PSIZE'+str(j) for j in _range] + \ ['DATATYPE', 'PSIZE', 'GCOUNT', 'PCOUNT', 'BSCALE', 'BZERO'] # Construct record array formats for the group parameters # as interpreted from the Primary header file for i in range(1, pcount+1): ptype = key[i-1] pdtype = phdr['PDTYPE'+str(i)] star = pdtype.find('*') _type = pdtype[:star] _bytes = pdtype[star+1:] # collect boolean keywords since they need special attention later if _type == 'LOGICAL': bools.append(i) if pdtype == 'REAL*4': floats.append(i) fmt = geis_fmt[_type] + _bytes formats.append((ptype,fmt)) _shape = _naxis[1:] _shape.reverse() _code = fits.BITPIX2DTYPE[_bitpix] _bscale = phdr.get('BSCALE', 1) _bzero = phdr.get('BZERO', 0) if phdr['DATATYPE'][:10] == 'UNSIGNED*2': _uint16 = 1 _bzero = 32768 else: _uint16 = 0 # delete from the end, so it will not conflict with previous delete for i in range(len(phdr)-1, -1, -1): if phdr.cards[i].keyword in _list: del phdr[i] # clean up other primary header keywords phdr['SIMPLE'] = True phdr['BITPIX'] = 16 phdr['GROUPS'] = False _after = 'NAXIS' if _naxis0 > 0: _after += str(_naxis0) phdr.set('EXTEND', value=True, comment="FITS dataset may contain extensions", after=_after) phdr.set('NEXTEND', value=gcount, comment="Number of standard extensions") hdulist = fits.HDUList([fits.PrimaryHDU(header=phdr, data=None)]) # Use copy-on-write for all data types since byteswap may be needed # in some platforms. f1 = open(data_file, mode='rb') dat = f1.read() # dat = memmap(data_file, mode='c') hdulist.mmobject = dat errormsg = "" loc = 0 for k in range(gcount): ext_dat = numpy.fromstring(dat[loc:loc+data_size], dtype=_code) ext_dat = ext_dat.reshape(_shape) if _uint16: ext_dat += _bzero # Check to see whether there are any NaN's or infs which might indicate # a byte-swapping problem, such as being written out on little-endian # and being read in on big-endian or vice-versa. if _code.find('float') >= 0 and \ (numpy.any(numpy.isnan(ext_dat)) or numpy.any(numpy.isinf(ext_dat))): errormsg += "===================================\n" errormsg += "= WARNING: =\n" errormsg += "= Input image: =\n" errormsg += input+"[%d]\n"%(k+1) errormsg += "= had floating point data values =\n" errormsg += "= of NaN and/or Inf. =\n" errormsg += "===================================\n" elif _code.find('int') >= 0: # Check INT data for max values ext_dat_frac,ext_dat_exp = numpy.frexp(ext_dat) if ext_dat_exp.max() == int(_bitpix) - 1: # Potential problems with byteswapping errormsg += "===================================\n" errormsg += "= WARNING: =\n" errormsg += "= Input image: =\n" errormsg += input+"[%d]\n"%(k+1) errormsg += "= had integer data values =\n" errormsg += "= with maximum bitvalues. =\n" errormsg += "===================================\n" ext_hdu = fits.ImageHDU(data=ext_dat) rec = numpy.fromstring(dat[loc+data_size:loc+group_size], dtype=formats) loc += group_size # Create separate PyFITS Card objects for each entry in 'rec' for i in range(1, pcount+1): #val = rec.field(i-1)[0] val = rec[0][i-1] if val.dtype.kind == 'S': val = val.decode('ascii') if i in bools: if val: val = True else: val = False if i in floats: # use fromstring, format in Card is deprecated in pyfits 0.9 _str = '%-8s= %20.7G / %s' % (key[i-1], val, comm[i-1]) _card = fits.Card.fromstring(_str) else: _card = fits.Card(keyword=key[i-1], value=val, comment=comm[i-1]) ext_hdu.header.append(_card) # deal with bscale/bzero if (_bscale != 1 or _bzero != 0): ext_hdu.header['BSCALE'] = _bscale ext_hdu.header['BZERO'] = _bzero hdulist.append(ext_hdu) if errormsg != "": errormsg += "===================================\n" errormsg += "= This file may have been =\n" errormsg += "= written out on a platform =\n" errormsg += "= with a different byte-order. =\n" errormsg += "= =\n" errormsg += "= Please verify that the values =\n" errormsg += "= are correct or apply the =\n" errormsg += "= '.byteswap()' method. =\n" errormsg += "===================================\n" print(errormsg) f1.close() stsci(hdulist) return hdulist
Parse two input arguments and return two lists of file names def parse_path(f1, f2): """Parse two input arguments and return two lists of file names""" import glob # if second argument is missing or is a wild card, point it # to the current directory f2 = f2.strip() if f2 == '' or f2 == '*': f2 = './' # if the first argument is a directory, use all GEIS files if os.path.isdir(f1): f1 = os.path.join(f1, '*.??h') list1 = glob.glob(f1) list1 = [name for name in list1 if name[-1] == 'h' and name[-4] == '.'] # if the second argument is a directory, use file names in the # first argument to construct file names, i.e. # abc.xyh will be converted to abc_xyf.fits if os.path.isdir(f2): list2 = [] for file in list1: name = os.path.split(file)[-1] fitsname = name[:-4] + '_' + name[-3:-1] + 'f.fits' list2.append(os.path.join(f2, fitsname)) else: list2 = [s.strip() for s in f2.split(",")] if list1 == [] or list2 == []: err_msg = "" if list1 == []: err_msg += "Input files `{:s}` not usable/available. ".format(f1) if list2 == []: err_msg += "Input files `{:s}` not usable/available. ".format(f2) raise IOError(err_msg) else: return list1, list2
Recursively parse user input based upon the irafglob program and construct a list of files that need to be processed. This program addresses the following deficiencies of the irafglob program:: parseinput can extract filenames from association tables Returns ------- This program will return a list of input files that will need to be processed in addition to the name of any outfiles specified in an association table. Parameters ---------- inputlist - string specification of input files using either wild-cards, @-file or comma-separated list of filenames outputname - string desired name for output product to be created from the input files atfile - object function to use in interpreting the @-file columns that gets passed to irafglob Returns ------- files - list of strings names of output files to be processed newoutputname - string name of output file to be created. See Also -------- stsci.tools.irafglob def parseinput(inputlist,outputname=None, atfile=None): """ Recursively parse user input based upon the irafglob program and construct a list of files that need to be processed. This program addresses the following deficiencies of the irafglob program:: parseinput can extract filenames from association tables Returns ------- This program will return a list of input files that will need to be processed in addition to the name of any outfiles specified in an association table. Parameters ---------- inputlist - string specification of input files using either wild-cards, @-file or comma-separated list of filenames outputname - string desired name for output product to be created from the input files atfile - object function to use in interpreting the @-file columns that gets passed to irafglob Returns ------- files - list of strings names of output files to be processed newoutputname - string name of output file to be created. See Also -------- stsci.tools.irafglob """ # Initalize some variables files = [] # list used to store names of input files newoutputname = outputname # Outputname returned to calling program. # The value of outputname is only changed # if it had a value of 'None' on input. # We can use irafglob to parse the input. If the input wasn't # an association table, it needs to be either a wildcard, '@' file, # or comma seperated list. files = irafglob(inputlist, atfile=atfile) # Now that we have expanded the inputlist into a python list # containing the list of input files, it is necessary to examine # each of the files to make sure none of them are association tables. # # If an association table is found, the entries should be read # Determine if the input is an association table for file in files: if (checkASN(file) == True): # Create a list to store the files extracted from the # association tiable assoclist = [] # The input is an association table try: # Open the association table assocdict = readASNTable(file, None, prodonly=False) except: errorstr = "###################################\n" errorstr += "# #\n" errorstr += "# UNABLE TO READ ASSOCIATION FILE,#\n" errorstr += str(file)+'\n' errorstr += "# DURING FILE PARSING. #\n" errorstr += "# #\n" errorstr += "# Please determine if the file is #\n" errorstr += "# in the current directory and #\n" errorstr += "# that it has been properly #\n" errorstr += "# formatted. #\n" errorstr += "# #\n" errorstr += "# This error message is being #\n" errorstr += "# generated from within the #\n" errorstr += "# parseinput.py module. #\n" errorstr += "# #\n" errorstr += "###################################\n" raise ValueError(errorstr) # Extract the output name from the association table if None # was provided on input. if outputname is None: newoutputname = assocdict['output'] # Loop over the association dictionary to extract the input # file names. for f in assocdict['order']: assoclist.append(fileutil.buildRootname(f)) # Remove the name of the association table from the list of files files.remove(file) # Append the list of filenames generated from the association table # to the master list of input files. files.extend(assoclist) # Return the list of the input files and the output name if provided in an association. return files, newoutputname
Determine if the filename provided to the function belongs to an association. Parameters ---------- filename: string Returns ------- validASN : boolean value def checkASN(filename): """ Determine if the filename provided to the function belongs to an association. Parameters ---------- filename: string Returns ------- validASN : boolean value """ # Extract the file extn type: extnType = filename[filename.rfind('_')+1:filename.rfind('.')] # Determine if this extn name is valid for an assocation file if isValidAssocExtn(extnType): return True else: return False