code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def get_tagged(self, event): """Return a list of tagged objects for a schema""" self.log("Tagged objects request for", event.data, "from", event.user, lvl=debug) if event.data in self.tags: tagged = self._get_tagged(event.data) response = { 'component': 'hfos.events.schemamanager', 'action': 'get', 'data': tagged } self.fireEvent(send(event.client.uuid, response)) else: self.log("Unavailable schema requested!", lvl=warn)
Return a list of tagged objects for a schema
def set_isotopic_ratio(self, compound='', element='', list_ratio=[]): """defines the new set of ratio of the compound/element and trigger the calculation to update the density Parameters: =========== compound: string (default is ''). Name of compound element: string (default is ''). Name of element list_ratio: list (default is []). list of new stoichiometric_ratio Raises: ======= ValueError if compound does not exist ValueError if element does not exist ValueError if list_ratio does not have the right format """ _stack = self.stack list_compounds = _stack.keys() if compound not in _stack.keys(): list_compounds_joined = ', '.join(list_compounds) raise ValueError("Compound '{}' could not be find in {}".format(compound, list_compounds_joined)) if element == '': # we assume that the element and compounds names matched element = compound list_element = _stack[compound].keys() if element not in list_element: list_element_joined = ', '.join(list_element) raise ValueError("Element '{}' should be any of those elements: {}".format(element, list_element_joined)) old_list_ratio = _stack[compound][element]['isotopes']['list'] if not (len(old_list_ratio) == len(list_ratio)): raise ValueError("New list of ratio ({} elements) does not match old list size ({} elements!".format(len( list_ratio), len(old_list_ratio))) self.stack[compound][element]['isotopes']['isotopic_ratio'] = list_ratio self.__update_molar_mass(compound=compound, element=element) self.__update_density(compound=compound, element=element) # update entire stack self.__math_on_stack()
defines the new set of ratio of the compound/element and trigger the calculation to update the density Parameters: =========== compound: string (default is ''). Name of compound element: string (default is ''). Name of element list_ratio: list (default is []). list of new stoichiometric_ratio Raises: ======= ValueError if compound does not exist ValueError if element does not exist ValueError if list_ratio does not have the right format
def calc_fwhm_gaussian(self, arr1d, medv=None, gauss_fn=None): """FWHM calculation on a 1D array by using least square fitting of a gaussian function on the data. arr1d is a 1D array cut in either X or Y direction on the object. """ if gauss_fn is None: gauss_fn = self.gaussian N = len(arr1d) X = np.array(list(range(N))) Y = arr1d # Fitting works more reliably if we do the following # a. subtract sky background if medv is None: medv = get_median(Y) Y = Y - medv maxv = Y.max() # b. clamp to 0..max (of the sky subtracted field) Y = Y.clip(0, maxv) # Fit a gaussian p0 = [0, N - 1, maxv] # Inital guess # Distance to the target function errfunc = lambda p, x, y: gauss_fn(x, p) - y # noqa # Least square fit to the gaussian with self.lock: # NOTE: without this mutex, optimize.leastsq causes a fatal error # sometimes--it appears not to be thread safe. # The error is: # "SystemError: null argument to internal routine" # "Fatal Python error: GC object already tracked" p1, success = optimize.leastsq(errfunc, p0[:], args=(X, Y)) if not success: raise IQCalcError("FWHM gaussian fitting failed") mu, sdev, maxv = p1 self.logger.debug("mu=%f sdev=%f maxv=%f" % (mu, sdev, maxv)) # Now that we have the sdev from fitting, we can calculate FWHM fwhm = 2.0 * np.sqrt(2.0 * np.log(2.0)) * sdev # some routines choke on numpy values and need "pure" Python floats # e.g. when marshalling through a remote procedure interface fwhm = float(fwhm) mu = float(mu) sdev = float(sdev) maxv = float(maxv) res = Bunch.Bunch(fwhm=fwhm, mu=mu, sdev=sdev, maxv=maxv, fit_fn=gauss_fn, fit_args=[mu, sdev, maxv]) return res
FWHM calculation on a 1D array by using least square fitting of a gaussian function on the data. arr1d is a 1D array cut in either X or Y direction on the object.
def convert_data_to_ndarray(self): """Converts the data from dataframe to ndarray format. Assumption: df-columns are ndarray-layers (3rd dim.)""" if self._data_structure != "DataFrame": raise Exception(f"Data is not a DataFrame but {self._data_structure}.") self._data = self._convert_to_ndarray(self._data) self._update_data_structure() return self
Converts the data from dataframe to ndarray format. Assumption: df-columns are ndarray-layers (3rd dim.)
def duplicate(self, host): # pylint: disable=too-many-locals """For a given host, look for all copy we must create for for_each property :param host: alignak host object :type host: alignak.objects.host.Host :return: list :rtype: list """ duplicates = [] # In macro, it's all in UPPER case prop = self.duplicate_foreach.strip().upper() if prop not in host.customs: # If I do not have the property, we bail out return duplicates # Get the list entry, and the not one if there is one entry = host.customs[prop] # Look at the list of the key we do NOT want maybe, # for _disks it will be _!disks not_entry = host.customs.get('_' + '!' + prop[1:], '').split(',') not_keys = strip_and_uniq(not_entry) default_value = getattr(self, 'default_value', '') # Transform the generator string to a list # Missing values are filled with the default value try: key_values = tuple(generate_key_value_sequences(entry, default_value)) except KeyValueSyntaxError as exc: fmt_dict = { 'prop': self.duplicate_foreach, 'host': host.get_name(), 'svc': self.service_description, 'entry': entry, 'exc': exc, } err = ( "The custom property %(prop)r of the " "host %(host)r is not a valid entry for a service generator: %(exc)s, " "with entry=%(entry)r") % fmt_dict logger.warning(err) host.add_error(err) return duplicates for key_value in key_values: key = key_value['KEY'] # Maybe this key is in the NOT list, if so, skip it if key in not_keys: continue new_s = self.copy() new_s.host_name = host.get_name() if self.is_tpl(): # if template, the new one is not new_s.register = 1 for key in key_value: if key == 'KEY': if hasattr(self, 'service_description'): # We want to change all illegal chars to a _ sign. # We can't use class.illegal_obj_char # because in the "explode" phase, we do not have access to this data! :( safe_key_value = re.sub(r'[' + "`~!$%^&*\"|'<>?,()=" + ']+', '_', key_value[key]) new_s.service_description = self.service_description.replace( '$' + key + '$', safe_key_value ) # Here is a list of property where we will expand the $KEY$ by the value _the_expandables = ['check_command', 'aggregation', 'event_handler'] for prop in _the_expandables: if hasattr(self, prop): # here we can replace VALUE, VALUE1, VALUE2,... setattr(new_s, prop, getattr(new_s, prop).replace('$' + key + '$', key_value[key])) if hasattr(self, 'service_dependencies'): for i, servicedep in enumerate(new_s.service_dependencies): new_s.service_dependencies[i] = servicedep.replace( '$' + key + '$', key_value[key] ) # And then add in our list this new service duplicates.append(new_s) return duplicates
For a given host, look for all copy we must create for for_each property :param host: alignak host object :type host: alignak.objects.host.Host :return: list :rtype: list
def getLeader(self, vehID, dist=0.): """getLeader(string, double) -> (string, double) Return the leading vehicle id together with the distance. The distance is measured from the front + minGap to the back of the leader, so it does not include the minGap of the vehicle. The dist parameter defines the maximum lookahead, 0 calculates a lookahead from the brake gap. Note that the returned leader may be farther away than the given dist. """ self._connection._beginMessage( tc.CMD_GET_VEHICLE_VARIABLE, tc.VAR_LEADER, vehID, 1 + 8) self._connection._string += struct.pack("!Bd", tc.TYPE_DOUBLE, dist) return _readLeader(self._connection._checkResult(tc.CMD_GET_VEHICLE_VARIABLE, tc.VAR_LEADER, vehID))
getLeader(string, double) -> (string, double) Return the leading vehicle id together with the distance. The distance is measured from the front + minGap to the back of the leader, so it does not include the minGap of the vehicle. The dist parameter defines the maximum lookahead, 0 calculates a lookahead from the brake gap. Note that the returned leader may be farther away than the given dist.
def allow(self, ctx, acls): '''Allow access to any ACL members that was equal to the user name. That is, some user u is considered a member of group u and no other. ''' for acl in acls: if self._identity == acl: return True return False
Allow access to any ACL members that was equal to the user name. That is, some user u is considered a member of group u and no other.
def _check_tcpdump(): """ Return True if the tcpdump command can be started """ with open(os.devnull, 'wb') as devnull: try: proc = subprocess.Popen([conf.prog.tcpdump, "--version"], stdout=devnull, stderr=subprocess.STDOUT) except OSError: return False if OPENBSD: # 'tcpdump --version' returns 1 on OpenBSD 6.4 return proc.wait() == 1 else: return proc.wait() == 0
Return True if the tcpdump command can be started
def _do_synchronise_jobs(walltime, machines): """ This returns a common reservation date for all the jobs. This reservation date is really only a hint and will be supplied to each oar server. Without this *common* reservation_date, one oar server can decide to postpone the start of the job while the other are already running. But this doens't prevent the start of a job on one site to drift (e.g because the machines need to be restarted.) But this shouldn't exceed few minutes. """ offset = SYNCHRONISATION_OFFSET start = time.time() + offset _t = time.strptime(walltime, "%H:%M:%S") _walltime = _t.tm_hour * 3600 + _t.tm_min * 60 + _t.tm_sec # Compute the demand for each cluster demands = defaultdict(int) for machine in machines: cluster = machine["cluster"] demands[cluster] += machine["nodes"] # Early leave if only one cluster is there if len(list(demands.keys())) <= 1: logger.debug("Only one cluster detected: no synchronisation needed") return None clusters = clusters_sites_obj(list(demands.keys())) # Early leave if only one site is concerned sites = set(list(clusters.values())) if len(sites) <= 1: logger.debug("Only one site detected: no synchronisation needed") return None # Test the proposed reservation_date ok = True for cluster, nodes in demands.items(): cluster_status = clusters[cluster].status.list() ok = ok and can_start_on_cluster(cluster_status.nodes, nodes, start, _walltime) if not ok: break if ok: # The proposed reservation_date fits logger.info("Reservation_date=%s (%s)" % (_date2h(start), sites)) return start if start is None: raise EnosG5kSynchronisationError(sites)
This returns a common reservation date for all the jobs. This reservation date is really only a hint and will be supplied to each oar server. Without this *common* reservation_date, one oar server can decide to postpone the start of the job while the other are already running. But this doens't prevent the start of a job on one site to drift (e.g because the machines need to be restarted.) But this shouldn't exceed few minutes.
def _validate_message(self, message): """Validate XML response from iLO. This function validates the XML response to see if the exit status is 0 or not in the response. If the status is non-zero it raises exception. """ if message.tag != 'RIBCL': # the true case shall be unreachable for response # XML from Ilo as all messages are tagged with RIBCL # but still raise an exception if any invalid # XML response is returned by Ilo. Set status to some # arbitary non-zero value. status = -1 raise exception.IloClientInternalError(message, status) for child in message: if child.tag != 'RESPONSE': return message status = int(child.get('STATUS'), 16) msg = child.get('MESSAGE') if status == 0 and msg != 'No error': return msg if status != 0: if 'syntax error' in msg or 'Feature not supported' in msg: for cmd in BOOT_MODE_CMDS: if cmd in msg: platform = self.get_product_name() msg = ("%(cmd)s is not supported on %(platform)s" % {'cmd': cmd, 'platform': platform}) LOG.debug(self._("Got invalid response with " "message: '%(message)s'"), {'message': msg}) raise (exception.IloCommandNotSupportedError (msg, status)) else: LOG.debug(self._("Got invalid response with " "message: '%(message)s'"), {'message': msg}) raise exception.IloClientInternalError(msg, status) if (status in exception.IloLoginFailError.statuses or msg in exception.IloLoginFailError.messages): LOG.debug(self._("Got invalid response with " "message: '%(message)s'"), {'message': msg}) raise exception.IloLoginFailError(msg, status) LOG.debug(self._("Got invalid response with " "message: '%(message)s'"), {'message': msg}) raise exception.IloError(msg, status)
Validate XML response from iLO. This function validates the XML response to see if the exit status is 0 or not in the response. If the status is non-zero it raises exception.
def clip_or_fit_solutions(self, pop, idx): """make sure that solutions fit to sample distribution, this interface will probably change. In particular the frequency of long vectors appearing in pop[idx] - self.mean is limited. """ for k in idx: self.repair_genotype(pop[k])
make sure that solutions fit to sample distribution, this interface will probably change. In particular the frequency of long vectors appearing in pop[idx] - self.mean is limited.
async def deserialize(data: dict): """ Builds a Proof object with defined attributes. Attributes are provided by a previous call to the serialize function. :param data: Example: name = "proof name" requested_attrs = [{"name": "age", "restrictions": [{"schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766" } ] }, { "name":"name", "restrictions": [ { "schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766"}]}] proof = await Proof.create(source_id, name, requested_attrs) data = proof.serialize() proof2 = await Proof.deserialize(data) :return: Proof Object """ return await Proof._deserialize("vcx_proof_deserialize", json.dumps(data), data.get('data').get('source_id'))
Builds a Proof object with defined attributes. Attributes are provided by a previous call to the serialize function. :param data: Example: name = "proof name" requested_attrs = [{"name": "age", "restrictions": [{"schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766" } ] }, { "name":"name", "restrictions": [ { "schema_id": "6XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"Faber Student Info", "schema_version":"1.0", "schema_issuer_did":"6XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"8XFh8yBzrpJQmNyZzgoTqB", "cred_def_id": "8XFh8yBzrpJQmNyZzgoTqB:3:CL:1766" }, { "schema_id": "5XFh8yBzrpJQmNyZzgoTqB:2:schema_name:0.0.11", "schema_name":"BYU Student Info", "schema_version":"1.0", "schema_issuer_did":"5XFh8yBzrpJQmNyZzgoTqB", "issuer_did":"66Fh8yBzrpJQmNyZzgoTqB", "cred_def_id": "66Fh8yBzrpJQmNyZzgoTqB:3:CL:1766"}]}] proof = await Proof.create(source_id, name, requested_attrs) data = proof.serialize() proof2 = await Proof.deserialize(data) :return: Proof Object
def start_heron_tools(masters, cl_args): ''' Start Heron tracker and UI ''' single_master = list(masters)[0] wait_for_master_to_start(single_master) cmd = "%s run %s >> /tmp/heron_tools_start.log 2>&1 &" \ % (get_nomad_path(cl_args), get_heron_tools_job_file(cl_args)) Log.info("Starting Heron Tools on %s" % single_master) if not is_self(single_master): cmd = ssh_remote_execute(cmd, single_master, cl_args) Log.debug(cmd) pid = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return_code = pid.wait() output = pid.communicate() Log.debug("return code: %s output: %s" % (return_code, output)) if return_code != 0: Log.error("Failed to start Heron Tools on %s with error:\n%s" % (single_master, output[1])) sys.exit(-1) wait_for_job_to_start(single_master, "heron-tools") Log.info("Done starting Heron Tools")
Start Heron tracker and UI
def _request(self, method, uri_relative, request_bytes, params, custom_headers): """ :type method: str :type uri_relative: str :type request_bytes: bytes :type params: dict[str, str] :type custom_headers: dict[str, str] :return: BunqResponseRaw """ uri_relative_with_params = self._append_params_to_uri(uri_relative, params) if uri_relative not in self._URIS_NOT_REQUIRING_ACTIVE_SESSION: if self._api_context.ensure_session_active(): from bunq.sdk.context import BunqContext BunqContext.update_api_context(self._api_context) all_headers = self._get_all_headers( method, uri_relative_with_params, request_bytes, custom_headers ) response = requests.request( method, self._get_uri_full(uri_relative_with_params), data=request_bytes, headers=all_headers, proxies={self._FIELD_PROXY_HTTPS: self._api_context.proxy_url}, ) self._assert_response_success(response) if self._api_context.installation_context is not None: security.validate_response( self._api_context.installation_context.public_key_server, response.status_code, response.content, response.headers ) return self._create_bunq_response_raw(response)
:type method: str :type uri_relative: str :type request_bytes: bytes :type params: dict[str, str] :type custom_headers: dict[str, str] :return: BunqResponseRaw
def learnObject(self, objectDescription, randomLocation=False, useNoise=False, noisyTrainingTime=1): """ Train the network to recognize the specified object. Move the sensor to one of its features and activate a random location representation in the location layer. Move the sensor over the object, updating the location representation through path integration. At each point on the object, form reciprocal connections between the represention of the location and the representation of the sensory input. @param objectDescription (dict) For example: {"name": "Object 1", "features": [{"top": 0, "left": 0, "width": 10, "height": 10, "name": "A"}, {"top": 0, "left": 10, "width": 10, "height": 10, "name": "B"}]} @return locationsAreUnique (bool) True if this object was assigned a unique set of locations. False if a location on this object has the same location representation as another location somewhere else. """ self.reset() self.column.activateRandomLocation() locationsAreUnique = True if randomLocation or useNoise: numIters = noisyTrainingTime else: numIters = 1 for i in xrange(numIters): for iFeature, feature in enumerate(objectDescription["features"]): self._move(feature, randomLocation=randomLocation, useNoise=useNoise) featureSDR = self.features[feature["name"]] self._sense(featureSDR, learn=True, waitForSettle=False) locationRepresentation = self.column.getSensoryAssociatedLocationRepresentation() self.locationRepresentations[(objectDescription["name"], iFeature)].append(locationRepresentation) self.inputRepresentations[(objectDescription["name"], iFeature, feature["name"])] = ( self.column.L4.getWinnerCells()) locationTuple = tuple(locationRepresentation) locationsAreUnique = (locationsAreUnique and locationTuple not in self.representationSet) self.representationSet.add(tuple(locationRepresentation)) self.learnedObjects.append(objectDescription) return locationsAreUnique
Train the network to recognize the specified object. Move the sensor to one of its features and activate a random location representation in the location layer. Move the sensor over the object, updating the location representation through path integration. At each point on the object, form reciprocal connections between the represention of the location and the representation of the sensory input. @param objectDescription (dict) For example: {"name": "Object 1", "features": [{"top": 0, "left": 0, "width": 10, "height": 10, "name": "A"}, {"top": 0, "left": 10, "width": 10, "height": 10, "name": "B"}]} @return locationsAreUnique (bool) True if this object was assigned a unique set of locations. False if a location on this object has the same location representation as another location somewhere else.
def get_instance(self): """ Returns the singleton instance. Upon its first call, it creates a new instance of the decorated class and calls its `__init__` method. On all subsequent calls, the already created instance is returned. """ try: return self._instance except AttributeError: self._instance = self._decorated() return self._instance
Returns the singleton instance. Upon its first call, it creates a new instance of the decorated class and calls its `__init__` method. On all subsequent calls, the already created instance is returned.
def _dK_dR(self, R): """Return numpy array of dK/dR from K1 up to and including Kn.""" return -self._ns * self._N / R**2 / self._sin_alpha
Return numpy array of dK/dR from K1 up to and including Kn.
def run(self, file_list): """ Runs pylint on the list of files and return a dictionary: {<filename>: [list of pylint errors], 'total': <int> - Total number of pylint messages, 'errors': <int> - Number of pylint errors, 'scores': (<filename>, score) - Individual score for each file.} :param file_list: :return: """ data = {'total': 0, 'errors': 0, 'scores': []} for filename in file_list: path, fname = os.path.split(filename) if os.path.splitext(filename)[1] != '.py': #Don't run on non-python files. continue with cd_ctx(path): short_data = pylint_raw([fname, "--reports=n", "-f", "text"]) full_data = pylint_raw([fname, "--reports=y", "-f", "text"]) score_regex = re.search(r"Your code has been rated at (-?\d+\.\d+)", full_data) if score_regex: score = score_regex.groups()[0] data['scores'].append((filename, float(score))) pylint_data = short_data.splitlines() #Remove the module line that is at the top of each pylint if len(pylint_data) > 0: pylint_data.pop(0) data[filename] = pylint_data for line in pylint_data[:]: if line.startswith('E'): data['errors'] += 1 #Ignore pylint fatal errors (problem w/ pylint, not the code generally). if line.startswith('F'): data[filename].remove(line) data['total'] += len(data[filename]) if len(data['scores']) > 0: data['average'] = (sum([score[1] for score in data['scores']]) / len(data['scores'])) else: data['average'] = 9 # Default average? Comes up when all files are new. print("Total: %s" % data['total']) print("Errors: %s" % data['errors']) print("Average score: %f" % data['average']) return data
Runs pylint on the list of files and return a dictionary: {<filename>: [list of pylint errors], 'total': <int> - Total number of pylint messages, 'errors': <int> - Number of pylint errors, 'scores': (<filename>, score) - Individual score for each file.} :param file_list: :return:
def _interpolationFunctionFactory(self, spline_order=None, cval=None): """Returns a function F(x,y,z) that interpolates any values on the grid. _interpolationFunctionFactory(self,spline_order=3,cval=None) --> F *cval* is set to :meth:`Grid.grid.min`. *cval* cannot be chosen too large or too small or NaN because otherwise the spline interpolation breaks down near that region and produces wild oscillations. .. Note:: Only correct for equally spaced values (i.e. regular edges with constant delta). .. SeeAlso:: http://www.scipy.org/Cookbook/Interpolation """ # for scipy >=0.9: should use scipy.interpolate.griddata # http://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.griddata.html#scipy.interpolate.griddata # (does it work for nD?) import scipy.ndimage if spline_order is None: # must be compatible with whatever :func:`scipy.ndimage.spline_filter` takes. spline_order = self.interpolation_spline_order if cval is None: cval = self.interpolation_cval data = self.grid if cval is None: cval = data.min() try: # masked arrays, fill with min: should keep spline happy _data = data.filled(cval) except AttributeError: _data = data coeffs = scipy.ndimage.spline_filter(_data, order=spline_order) x0 = self.origin dx = self.delta def _transform(cnew, c0, dc): return (numpy.atleast_1d(cnew) - c0) / dc def interpolatedF(*coordinates): """B-spline function over the data grid(x,y,z). interpolatedF([x1,x2,...],[y1,y2,...],[z1,z2,...]) -> F[x1,y1,z1],F[x2,y2,z2],... Example usage for resampling:: >>> XX,YY,ZZ = numpy.mgrid[40:75:0.5, 96:150:0.5, 20:50:0.5] >>> FF = _interpolationFunction(XX,YY,ZZ) """ _coordinates = numpy.array( [_transform(coordinates[i], x0[i], dx[i]) for i in range(len( coordinates))]) return scipy.ndimage.map_coordinates(coeffs, _coordinates, prefilter=False, mode='nearest', cval=cval) # mode='wrap' would be ideal but is broken: https://github.com/scipy/scipy/issues/1323 return interpolatedF
Returns a function F(x,y,z) that interpolates any values on the grid. _interpolationFunctionFactory(self,spline_order=3,cval=None) --> F *cval* is set to :meth:`Grid.grid.min`. *cval* cannot be chosen too large or too small or NaN because otherwise the spline interpolation breaks down near that region and produces wild oscillations. .. Note:: Only correct for equally spaced values (i.e. regular edges with constant delta). .. SeeAlso:: http://www.scipy.org/Cookbook/Interpolation
def _group_and_publish_tasks_statistics(self, result): """This function group statistics of same tasks by adding them. It also add 'instances_count' statistic to get information about how many instances is running on the server Args: result: result of mesos query. List of dictionaries with 'executor_id', 'framework_id' as a strings and 'statistics' as dictionary of labeled numbers """ for i in result: executor_id = i['executor_id'] i['executor_id'] = executor_id[:executor_id.rfind('.')] i['statistics']['instances_count'] = 1 r = {} for i in result: executor_id = i['executor_id'] r[executor_id] = r.get(executor_id, {}) r[executor_id]['framework_id'] = i['framework_id'] r[executor_id]['statistics'] = r[executor_id].get('statistics', {}) r[executor_id]['statistics'] = self._sum_statistics( i['statistics'], r[executor_id]['statistics']) self._add_cpu_usage(r) self._add_cpu_percent(r) self._add_mem_percent(r) self._publish(r)
This function group statistics of same tasks by adding them. It also add 'instances_count' statistic to get information about how many instances is running on the server Args: result: result of mesos query. List of dictionaries with 'executor_id', 'framework_id' as a strings and 'statistics' as dictionary of labeled numbers
def restore_geometry_on_layout_change(self, value): """ Setter for **self.__restore_geometry_on_layout_change** attribute. :param value: Attribute value. :type value: bool """ if value is not None: assert type(value) is bool, "'{0}' attribute: '{1}' type is not 'bool'!".format( "restore_geometry_on_layout_change", value) self.__restore_geometry_on_layout_change = value
Setter for **self.__restore_geometry_on_layout_change** attribute. :param value: Attribute value. :type value: bool
def wet_bulb_from_db_rh(db_temp, rh, b_press=101325): """Wet Bulb Temperature (C) at db_temp (C), Relative Humidity rh (%), and Pressure b_press (Pa). Note: [1] J. Sullivan and L. D. Sanders. "Method for obtaining wet-bulb temperatures by modifying the psychrometric formula." Center for Experiment Design and Data Analysis. NOAA - National Oceanic and Atmospheric Administration. http://www.srh.noaa.gov/epz/?n=wxcalc_rh """ es = 6.112 * math.e**((17.67 * db_temp) / (db_temp + 243.5)) e = (es * rh) / 100 t_w = 0 increse = 10.0 previoussign = 1 e_d = 1 while math.fabs(e_d) > 0.005: e_wg = 6.112 * (math.e**((17.67 * t_w) / (t_w + 243.5))) eg = e_wg - (b_press/100) * (db_temp - t_w) * 0.00066 * (1 + (0.00155 * t_w)) e_d = e - eg if e_d == 0: break else: if e_d < 0: cursign = -1 if cursign != previoussign: previoussign = cursign increse = increse / 10 else: increse = increse else: cursign = 1 if cursign != previoussign: previoussign = cursign increse = increse/10 else: increse = increse t_w = t_w + increse * previoussign return t_w
Wet Bulb Temperature (C) at db_temp (C), Relative Humidity rh (%), and Pressure b_press (Pa). Note: [1] J. Sullivan and L. D. Sanders. "Method for obtaining wet-bulb temperatures by modifying the psychrometric formula." Center for Experiment Design and Data Analysis. NOAA - National Oceanic and Atmospheric Administration. http://www.srh.noaa.gov/epz/?n=wxcalc_rh
def leaf_sections(h): """ Returns a list of all sections that have no children. """ leaves = [] for section in h.allsec(): sref = h.SectionRef(sec=section) # nchild returns a float... cast to bool if sref.nchild() < 0.9: leaves.append(section) return leaves
Returns a list of all sections that have no children.
def activate_user(self, user): """Activates a specified user. Returns `True` if a change was made. :param user: The user to activate """ if not user.active: user.active = True return True return False
Activates a specified user. Returns `True` if a change was made. :param user: The user to activate
def apply_palette(img, palette, options): '''Apply the pallete to the given image. The first step is to set all background pixels to the background color; then, nearest-neighbor matching is used to map each foreground color to the closest one in the palette. ''' if not options.quiet: print(' applying palette...') bg_color = palette[0] fg_mask = get_fg_mask(bg_color, img, options) orig_shape = img.shape pixels = img.reshape((-1, 3)) fg_mask = fg_mask.flatten() num_pixels = pixels.shape[0] labels = np.zeros(num_pixels, dtype=np.uint8) labels[fg_mask], _ = vq(pixels[fg_mask], palette) return labels.reshape(orig_shape[:-1])
Apply the pallete to the given image. The first step is to set all background pixels to the background color; then, nearest-neighbor matching is used to map each foreground color to the closest one in the palette.
def download_interim_for_gssha(main_directory, start_datetime, end_datetime, leftlon=-180, rightlon=180, toplat=90, bottomlat=-90, precip_only=False): """ Function to download ERA5 data for GSSHA .. note:: https://software.ecmwf.int/wiki/display/WEBAPI/Access+ECMWF+Public+Datasets Args: main_directory(:obj:`str`): Location of the output for the forecast data. start_datetime(:obj:`str`): Datetime for download start. end_datetime(:obj:`str`): Datetime for download end. leftlon(Optional[:obj:`float`]): Left bound for longitude. Default is -180. rightlon(Optional[:obj:`float`]): Right bound for longitude. Default is 180. toplat(Optional[:obj:`float`]): Top bound for latitude. Default is 90. bottomlat(Optional[:obj:`float`]): Bottom bound for latitude. Default is -90. precip_only(Optional[bool]): If True, will only download precipitation. Example:: from gsshapy.grid.era_to_gssha import download_era_interim_for_gssha era_interim_folder = '/era_interim' leftlon = -95 rightlon = -75 toplat = 35 bottomlat = 30 download_era_interim_for_gssha(era5_folder, leftlon, rightlon, toplat, bottomlat) """ # parameters: https://software.ecmwf.int/wiki/display/CKB/Details+of+ERA-Interim+parameters # import here to make sure it is not required to run from ecmwfapi import ECMWFDataServer server = ECMWFDataServer() try: mkdir(main_directory) except OSError: pass download_area = "{toplat}/{leftlon}/{bottomlat}/{rightlon}".format(toplat=toplat, leftlon=leftlon, bottomlat=bottomlat, rightlon=rightlon) download_datetime = start_datetime interim_request = { 'dataset': "interim", # 'oper' specifies the high resolution daily data, as opposed to monthly means, wave, eda edmm, etc. 'stream': "oper", # Surface level, as opposed to pressure level (pl) or model level (ml) 'levtype': "sfc", # The spatial resolution in ERA interim is 80 km globally on a Gaussian grid. # Here we us lat/long with 0.75 degrees, which is approximately the equivalent of 80km. 'grid': "0.5/0.5", 'area': download_area, 'format': 'netcdf', } while download_datetime <= end_datetime: interim_request['date'] = download_datetime.strftime("%Y-%m-%d") if not precip_only: download_file = path.join(main_directory, "erai_gssha_{0}_an.nc".format(download_datetime.strftime("%Y%m%d"))) if not path.exists(download_file): # We want instantaneous parameters, which are archived as type Analysis ('an') as opposed to forecast (fc) interim_request['type'] = "an" # For parameter codes see the ECMWF parameter database at http://apps.ecmwf.int/codes/grib/param-db interim_request['param'] = "2t/2d/sp/10u/10v/tcc" # step 0 is analysis, 3-12 is forecast interim_request['step'] = "0" # ERA Interim provides 6-hourly analysis interim_request['time'] = "00/06/12/18" interim_request['target'] = download_file server.retrieve(interim_request) download_file = path.join(main_directory, "erai_gssha_{0}_1_fc.nc".format(download_datetime.strftime("%Y%m%d"))) if not path.exists(download_file): interim_request['type'] = "fc" interim_request['param'] = "2t/2d/sp/10u/10v/tcc" interim_request['step'] = "3" interim_request['time'] = "00/06/12/18" interim_request['target'] = download_file server.retrieve(interim_request) download_file = path.join(main_directory, "erai_gssha_{0}_fc.nc".format(download_datetime.strftime("%Y%m%d"))) if not path.exists(download_file): interim_request['type'] = "fc" interim_request['param'] = "tp/ssrd" interim_request['step'] = "3/6/9/12" interim_request['time'] = "00/12" interim_request['target'] = download_file server.retrieve(interim_request) # TODO: READ FILE AND MODIFY VALUES SO IT IS NOT INCREMENTAL # https://software.ecmwf.int/wiki/pages/viewpage.action?pageId=56658233 # You need total precipitation for every 6 hours. # Daily total precipitation (tp) is only available with a forecast base time 00:00 and 12:00, # so to get tp for every 6 hours you will need to extract (and for the second and fourth period calculate): # tp(00-06) = (time 00, step 6) # tp(06-12) = (time 00, step 12) minus (time 00, step 6) # tp(12-18) = (time 12, step 6) # tp(18-24) = (time 12, step 12) minus (time 12, step 6) # (Note the units for total precipitation is meters.) tmp_download_file = download_file + '_tmp' with xr.open_dataset(download_file) as xd: diff_xd = xd.diff('time') xd.tp[1:4] = diff_xd.tp[:3] xd.tp[5:] = diff_xd.tp[4:] xd.ssrd[1:4] = diff_xd.ssrd[:3] xd.ssrd[5:] = diff_xd.ssrd[4:] xd.to_netcdf(tmp_download_file) remove(download_file) rename(tmp_download_file, download_file) download_file = path.join(main_directory, "erai_gssha_{0}_0_fc.nc".format(download_datetime.strftime("%Y%m%d"))) if download_datetime <= start_datetime and not path.exists(download_file): loc_download_date = (download_datetime-timedelta(1)).strftime("%Y-%m-%d") interim_request['type'] = "fc" interim_request['param'] = "tp/ssrd" interim_request['step'] = "9/12" interim_request['time'] = "12" interim_request['target'] = download_file interim_request['date'] = loc_download_date server.retrieve(interim_request) # convert to incremental (see above) tmp_download_file = download_file + '_tmp' with xr.open_dataset(download_file) as xd: inc_xd = xd.diff('time') inc_xd.to_netcdf(tmp_download_file) remove(download_file) rename(tmp_download_file, download_file) download_datetime += timedelta(1)
Function to download ERA5 data for GSSHA .. note:: https://software.ecmwf.int/wiki/display/WEBAPI/Access+ECMWF+Public+Datasets Args: main_directory(:obj:`str`): Location of the output for the forecast data. start_datetime(:obj:`str`): Datetime for download start. end_datetime(:obj:`str`): Datetime for download end. leftlon(Optional[:obj:`float`]): Left bound for longitude. Default is -180. rightlon(Optional[:obj:`float`]): Right bound for longitude. Default is 180. toplat(Optional[:obj:`float`]): Top bound for latitude. Default is 90. bottomlat(Optional[:obj:`float`]): Bottom bound for latitude. Default is -90. precip_only(Optional[bool]): If True, will only download precipitation. Example:: from gsshapy.grid.era_to_gssha import download_era_interim_for_gssha era_interim_folder = '/era_interim' leftlon = -95 rightlon = -75 toplat = 35 bottomlat = 30 download_era_interim_for_gssha(era5_folder, leftlon, rightlon, toplat, bottomlat)
def reporter(self): """ Creates a report of the results """ # Create the path in which the reports are stored make_path(self.reportpath) logging.info('Creating {} report'.format(self.analysistype)) # Initialise the header and data strings header = 'Strain,Gene,PercentIdentity,Genus,FoldCoverage\n' data = '' with open(self.sixteens_report, 'w') as report: with open(os.path.join(self.reportpath, self.analysistype + '_sequences.fa'), 'w') as sequences: for sample in self.runmetadata.samples: # Initialise sample[self.analysistype].sixteens_match = 'NA' sample[self.analysistype].species = 'NA' try: # Select the best hit of all the full-length 16S genes mapped - for 16S use the hit with the # fewest number of SNPs rather than the highest percent identity sample[self.analysistype].besthit = sorted(sample[self.analysistype].resultssnp.items(), key=operator.itemgetter(1))[0][0] # Parse the baited FASTA file to pull out the the description of the hit for record in SeqIO.parse(sample[self.analysistype].baitfile, 'fasta'): # If the best hit e.g. gi|631251361|ref|NR_112558.1| is present in the current record, # gi|631251361|ref|NR_112558.1| Escherichia coli strain JCM 1649 16S ribosomal RNA ..., # extract the match and the species if sample[self.analysistype].besthit in record.id: # Set the best match and species from the records sample[self.analysistype].sixteens_match = record.description.split(' 16S')[0] sample[self.analysistype].species = \ sample[self.analysistype].sixteens_match.split('|')[-1].split()[1] # Add the sample name to the data string data += sample.name + ',' # Find the record that matches the best hit, and extract the necessary values to be place in the # data string for name, identity in sample[self.analysistype].results.items(): if name == sample[self.analysistype].besthit: data += '{},{},{},{}\n'.format(name, identity, sample[self.analysistype].genus, sample[self.analysistype].avgdepth[name]) # Create a FASTA-formatted sequence output of the 16S sequence record = SeqRecord(Seq(sample[self.analysistype].sequences[name], IUPAC.unambiguous_dna), id='{}_{}'.format(sample.name, '16S'), description='') SeqIO.write(record, sequences, 'fasta') except (AttributeError, IndexError): data += '{}\n'.format(sample.name) # Write the results to the report report.write(header) report.write(data)
Creates a report of the results
def getClassAllSubs(self, aURI): """ note: requires SPARQL 1.1 2015-06-04: currenlty not used, inferred from above """ aURI = aURI try: qres = self.rdfgraph.query( """SELECT DISTINCT ?x WHERE { { ?x rdfs:subClassOf+ <%s> } FILTER (!isBlank(?x)) } """ % (aURI)) except: printDebug("... warning: the 'getClassAllSubs' query failed (maybe missing SPARQL 1.1 support?)") qres = [] return list(qres)
note: requires SPARQL 1.1 2015-06-04: currenlty not used, inferred from above
async def async_input(prompt): """ Python's ``input()`` is blocking, which means the event loop we set above can't be running while we're blocking there. This method will let the loop run while we wait for input. """ print(prompt, end='', flush=True) return (await loop.run_in_executor(None, sys.stdin.readline)).rstrip()
Python's ``input()`` is blocking, which means the event loop we set above can't be running while we're blocking there. This method will let the loop run while we wait for input.
def loads(string): """Loads the filters dictionary given a string.""" d = _loads(string) for k, v in d.items(): FILTERS[dr.get_component(k) or k] = set(v)
Loads the filters dictionary given a string.
def get_location(conn, vm_): ''' Return the location object to use ''' locations = conn.list_locations() vm_location = config.get_cloud_config_value('location', vm_, __opts__) if not six.PY3: vm_location = vm_location.encode( 'ascii', 'salt-cloud-force-ascii' ) for img in locations: if isinstance(img.id, six.string_types) and not six.PY3: img_id = img.id.encode('ascii', 'salt-cloud-force-ascii') else: img_id = str(img.id) # future lint: disable=blacklisted-function if isinstance(img.name, six.string_types) and not six.PY3: img_name = img.name.encode('ascii', 'salt-cloud-force-ascii') else: img_name = str(img.name) # future lint: disable=blacklisted-function if vm_location and vm_location in (img_id, img_name): return img raise SaltCloudNotFound( 'The specified location, \'{0}\', could not be found.'.format( vm_location ) )
Return the location object to use
def find_one(self, id_, raw=True, recovery_name=True): """Find one record. Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve :param id_: record id_ :param using_name: if you are using field name in filter and sort_field, please set using_name = True (it's the default), otherwise, False :param raw: Default True, set True if you want the data in raw format. Otherwise, html format :param recovery_name: Default True, set True if you want field name instead of field key **中文文档** 返回一条记录 """ url = "https://api.knackhq.com/v1/objects/%s/records/%s" % ( self.key, id_) res = self.get(url) if raw: try: res = self.get_raw_values(res, recovery_name=recovery_name) except: pass else: try: res = self.get_html_values(res, recovery_name=recovery_name) except: pass return res
Find one record. Ref: http://helpdesk.knackhq.com/support/solutions/articles/5000446111-api-reference-root-access#retrieve :param id_: record id_ :param using_name: if you are using field name in filter and sort_field, please set using_name = True (it's the default), otherwise, False :param raw: Default True, set True if you want the data in raw format. Otherwise, html format :param recovery_name: Default True, set True if you want field name instead of field key **中文文档** 返回一条记录
def to_message(self, keywords=None, show_header=True): """Format keywords as a message object. .. versionadded:: 3.2 .. versionchanged:: 3.3 - default keywords to None The message object can then be rendered to html, plain text etc. :param keywords: Keywords to be converted to a message. Optional. If not passed then we will attempt to get keywords from self.layer if it is not None. :type keywords: dict :param show_header: Flag indicating if InaSAFE logo etc. should be added above the keywords table. Default is True. :type show_header: bool :returns: A safe message object containing a table. :rtype: safe.messaging.Message """ if keywords is None and self.layer is not None: keywords = self.read_keywords(self.layer) # This order was determined in issue #2313 preferred_order = [ 'title', 'layer_purpose', 'exposure', 'hazard', 'hazard_category', 'layer_geometry', 'layer_mode', 'classification', 'exposure_unit', 'continuous_hazard_unit', 'value_map', # attribute values 'thresholds', # attribute values 'value_maps', # attribute values 'inasafe_fields', 'inasafe_default_values', 'resample', 'source', 'url', 'scale', 'license', 'date', 'extra_keywords', 'keyword_version' ] # everything else in arbitrary order report = m.Message() if show_header: logo_element = m.Brand() report.add(logo_element) report.add(m.Heading(tr( 'Layer keywords:'), **styles.BLUE_LEVEL_4_STYLE)) report.add(m.Text(tr( 'The following keywords are defined for the active layer:'))) table = m.Table(style_class='table table-condensed table-striped') # First render out the preferred order keywords for keyword in preferred_order: if keyword in keywords: value = keywords[keyword] row = self._keyword_to_row(keyword, value) keywords.pop(keyword) table.add(row) # now render out any remaining keywords in arbitrary order for keyword in keywords: value = keywords[keyword] row = self._keyword_to_row(keyword, value) table.add(row) # If the keywords class was instantiated with a layer object # we can add some context info not stored in the keywords themselves # but that is still useful to see... if self.layer: # First the CRS keyword = tr('Reference system') value = self.layer.crs().authid() row = self._keyword_to_row(keyword, value) table.add(row) # Next the data source keyword = tr('Layer source') value = self.layer.publicSource() # Hide password row = self._keyword_to_row(keyword, value, wrap_slash=True) table.add(row) # Finalise the report report.add(table) return report
Format keywords as a message object. .. versionadded:: 3.2 .. versionchanged:: 3.3 - default keywords to None The message object can then be rendered to html, plain text etc. :param keywords: Keywords to be converted to a message. Optional. If not passed then we will attempt to get keywords from self.layer if it is not None. :type keywords: dict :param show_header: Flag indicating if InaSAFE logo etc. should be added above the keywords table. Default is True. :type show_header: bool :returns: A safe message object containing a table. :rtype: safe.messaging.Message
def _to_dict(self): """Return a json dictionary representing this model.""" _dict = {} if hasattr(self, 'classifier_id') and self.classifier_id is not None: _dict['classifier_id'] = self.classifier_id if hasattr(self, 'url') and self.url is not None: _dict['url'] = self.url if hasattr(self, 'collection') and self.collection is not None: _dict['collection'] = [x._to_dict() for x in self.collection] return _dict
Return a json dictionary representing this model.
def load_wmt_en_fr_dataset(path='data'): """Load WMT'15 English-to-French translation dataset. It will download the data from the WMT'15 Website (10^9-French-English corpus), and the 2013 news test from the same site as development set. Returns the directories of training data and test data. Parameters ---------- path : str The path that the data is downloaded to, defaults is ``data/wmt_en_fr/``. References ---------- - Code modified from /tensorflow/models/rnn/translation/data_utils.py Notes ----- Usually, it will take a long time to download this dataset. """ path = os.path.join(path, 'wmt_en_fr') # URLs for WMT data. _WMT_ENFR_TRAIN_URL = "http://www.statmt.org/wmt10/" _WMT_ENFR_DEV_URL = "http://www.statmt.org/wmt15/" def gunzip_file(gz_path, new_path): """Unzips from gz_path into new_path.""" logging.info("Unpacking %s to %s" % (gz_path, new_path)) with gzip.open(gz_path, "rb") as gz_file: with open(new_path, "wb") as new_file: for line in gz_file: new_file.write(line) def get_wmt_enfr_train_set(path): """Download the WMT en-fr training corpus to directory unless it's there.""" filename = "training-giga-fren.tar" maybe_download_and_extract(filename, path, _WMT_ENFR_TRAIN_URL, extract=True) train_path = os.path.join(path, "giga-fren.release2.fixed") gunzip_file(train_path + ".fr.gz", train_path + ".fr") gunzip_file(train_path + ".en.gz", train_path + ".en") return train_path def get_wmt_enfr_dev_set(path): """Download the WMT en-fr training corpus to directory unless it's there.""" filename = "dev-v2.tgz" dev_file = maybe_download_and_extract(filename, path, _WMT_ENFR_DEV_URL, extract=False) dev_name = "newstest2013" dev_path = os.path.join(path, "newstest2013") if not (gfile.Exists(dev_path + ".fr") and gfile.Exists(dev_path + ".en")): logging.info("Extracting tgz file %s" % dev_file) with tarfile.open(dev_file, "r:gz") as dev_tar: fr_dev_file = dev_tar.getmember("dev/" + dev_name + ".fr") en_dev_file = dev_tar.getmember("dev/" + dev_name + ".en") fr_dev_file.name = dev_name + ".fr" # Extract without "dev/" prefix. en_dev_file.name = dev_name + ".en" dev_tar.extract(fr_dev_file, path) dev_tar.extract(en_dev_file, path) return dev_path logging.info("Load or Download WMT English-to-French translation > {}".format(path)) train_path = get_wmt_enfr_train_set(path) dev_path = get_wmt_enfr_dev_set(path) return train_path, dev_path
Load WMT'15 English-to-French translation dataset. It will download the data from the WMT'15 Website (10^9-French-English corpus), and the 2013 news test from the same site as development set. Returns the directories of training data and test data. Parameters ---------- path : str The path that the data is downloaded to, defaults is ``data/wmt_en_fr/``. References ---------- - Code modified from /tensorflow/models/rnn/translation/data_utils.py Notes ----- Usually, it will take a long time to download this dataset.
def envelope(self, header, body): """ Build the B{<Envelope/>} for a SOAP outbound message. @param header: The SOAP message B{header}. @type header: L{Element} @param body: The SOAP message B{body}. @type body: L{Element} @return: The SOAP envelope containing the body and header. @rtype: L{Element} """ env = Element("Envelope", ns=envns) env.addPrefix(Namespace.xsins[0], Namespace.xsins[1]) env.append(header) env.append(body) return env
Build the B{<Envelope/>} for a SOAP outbound message. @param header: The SOAP message B{header}. @type header: L{Element} @param body: The SOAP message B{body}. @type body: L{Element} @return: The SOAP envelope containing the body and header. @rtype: L{Element}
def read_config(self, filename): """ Returns data found in config file (as dict), or raises exception if file not found """ if not os.path.exists(filename): raise Exception("Configuration file cannot be found: %s" % filename) with io.open(filename, encoding='UTF-8') as stream: return yaml.safe_load(stream)
Returns data found in config file (as dict), or raises exception if file not found
def save_uca(self, rootpath, raw=False, as_int=False): """ Saves the upstream contributing area to a file """ self.save_array(self.uca, None, 'uca', rootpath, raw, as_int=as_int)
Saves the upstream contributing area to a file
def add(self, *args): """ This function adds strings to the keyboard, while not exceeding row_width. E.g. ReplyKeyboardMarkup#add("A", "B", "C") yields the json result {keyboard: [["A"], ["B"], ["C"]]} when row_width is set to 1. When row_width is set to 2, the following is the result of this function: {keyboard: [["A", "B"], ["C"]]} See https://core.telegram.org/bots/api#replykeyboardmarkup :param args: KeyboardButton to append to the keyboard """ i = 1 row = [] for button in args: row.append(button.to_dic()) if i % self.row_width == 0: self.keyboard.append(row) row = [] i += 1 if len(row) > 0: self.keyboard.append(row)
This function adds strings to the keyboard, while not exceeding row_width. E.g. ReplyKeyboardMarkup#add("A", "B", "C") yields the json result {keyboard: [["A"], ["B"], ["C"]]} when row_width is set to 1. When row_width is set to 2, the following is the result of this function: {keyboard: [["A", "B"], ["C"]]} See https://core.telegram.org/bots/api#replykeyboardmarkup :param args: KeyboardButton to append to the keyboard
def addfield(self, pkt, s, i): """ There is a hack with the _ExtensionsField.i2len. It works only because we expect _ExtensionsField.i2m to return a string of the same size (if not of the same value) upon successive calls (e.g. through i2len here, then i2m when directly building the _ExtensionsField). XXX A proper way to do this would be to keep the extensions built from the i2len call here, instead of rebuilding them later on. """ if i is None: if self.length_of is not None: fld, fval = pkt.getfield_and_val(self.length_of) tmp = pkt.tls_session.frozen pkt.tls_session.frozen = True f = fld.i2len(pkt, fval) pkt.tls_session.frozen = tmp i = self.adjust(pkt, f) if i == 0: # for correct build if no ext and not explicitly 0 return s return s + struct.pack(self.fmt, i)
There is a hack with the _ExtensionsField.i2len. It works only because we expect _ExtensionsField.i2m to return a string of the same size (if not of the same value) upon successive calls (e.g. through i2len here, then i2m when directly building the _ExtensionsField). XXX A proper way to do this would be to keep the extensions built from the i2len call here, instead of rebuilding them later on.
async def track_event(event, state, service_name): """ Store state of events in memory :param event: Event object :param state: EventState object :param service_name: Name of service name """ redis = await aioredis.create_redis( (EVENT_TRACKING_HOST, 6379), loop=loop) now = datetime.utcnow() event_id = event.event_id tracking_data = json.dumps({ "event_id": event_id, "timestamp": str(now), "state": state }) await redis.rpush(service_name, tracking_data) redis.close() await redis.wait_closed()
Store state of events in memory :param event: Event object :param state: EventState object :param service_name: Name of service name
def quantile(y, k=4): """ Calculates the quantiles for an array Parameters ---------- y : array (n,1), values to classify k : int number of quantiles Returns ------- q : array (n,1), quantile values Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(1000) >>> mc.classifiers.quantile(x) array([249.75, 499.5 , 749.25, 999. ]) >>> mc.classifiers.quantile(x, k = 3) array([333., 666., 999.]) Note that if there are enough ties that the quantile values repeat, we collapse to pseudo quantiles in which case the number of classes will be less than k >>> x = [1.0] * 100 >>> x.extend([3.0] * 40) >>> len(x) 140 >>> y = np.array(x) >>> mc.classifiers.quantile(y) array([1., 3.]) """ w = 100. / k p = np.arange(w, 100 + w, w) if p[-1] > 100.0: p[-1] = 100.0 q = np.array([stats.scoreatpercentile(y, pct) for pct in p]) q = np.unique(q) k_q = len(q) if k_q < k: Warn('Warning: Not enough unique values in array to form k classes', UserWarning) Warn('Warning: setting k to %d' % k_q, UserWarning) return q
Calculates the quantiles for an array Parameters ---------- y : array (n,1), values to classify k : int number of quantiles Returns ------- q : array (n,1), quantile values Examples -------- >>> import numpy as np >>> import mapclassify as mc >>> x = np.arange(1000) >>> mc.classifiers.quantile(x) array([249.75, 499.5 , 749.25, 999. ]) >>> mc.classifiers.quantile(x, k = 3) array([333., 666., 999.]) Note that if there are enough ties that the quantile values repeat, we collapse to pseudo quantiles in which case the number of classes will be less than k >>> x = [1.0] * 100 >>> x.extend([3.0] * 40) >>> len(x) 140 >>> y = np.array(x) >>> mc.classifiers.quantile(y) array([1., 3.])
def set_ccc(ctx, management_key, pin): """ Generate and set a CCC on the YubiKey. """ controller = ctx.obj['controller'] _ensure_authenticated(ctx, controller, pin, management_key) controller.update_ccc()
Generate and set a CCC on the YubiKey.
def working_yesterday(self, date_from=None, date_format=None): """ Retourne la date d'hier depuis maintenant ou depuis une date fournie seulement sur les jours ouvrableq. Ainsi lundi devient samedi et samedi devient vendredi :param: :date_from date de référence :return datetime """ # date d'hier que sur les jours de week-end return self.delta(days=-1, date_from=date_from, date_format=date_format, days_range=[1, 2, 3, 4, 5, 6])
Retourne la date d'hier depuis maintenant ou depuis une date fournie seulement sur les jours ouvrableq. Ainsi lundi devient samedi et samedi devient vendredi :param: :date_from date de référence :return datetime
def __RetrieveContent(host, port, adapter, version, path, keyFile, certFile, thumbprint, sslContext, connectionPoolTimeout=CONNECTION_POOL_IDLE_TIMEOUT_SEC): """ Retrieve service instance for connection. @param host: Which host to connect to. @type host: string @param port: Port @type port: int @param adapter: Adapter @type adapter: string @param version: Version @type version: string @param path: Path @type path: string @param keyFile: ssl key file path @type keyFile: string @param certFile: ssl cert file path @type certFile: string @param connectionPoolTimeout: Timeout in secs for idle connections to close, specify negative numbers for never closing the connections @type connectionPoolTimeout: int """ # XXX remove the adapter and service arguments once dependent code is fixed if adapter != "SOAP": raise ValueError(adapter) # Create the SOAP stub adapter stub = SoapStubAdapter(host, port, version=version, path=path, certKeyFile=keyFile, certFile=certFile, thumbprint=thumbprint, sslContext=sslContext, connectionPoolTimeout=connectionPoolTimeout) # Get Service instance si = vim.ServiceInstance("ServiceInstance", stub) content = None try: content = si.RetrieveContent() except vmodl.MethodFault: raise except Exception as e: # NOTE (hartsock): preserve the traceback for diagnostics # pulling and preserving the traceback makes diagnosing connection # failures easier since the fault will also include where inside the # library the fault occurred. Without the traceback we have no idea # why the connection failed beyond the message string. (type, value, traceback) = sys.exc_info() if traceback: fault = vim.fault.HostConnectFault(msg=str(e)) reraise(vim.fault.HostConnectFault, fault, traceback) else: raise vim.fault.HostConnectFault(msg=str(e)) return content, si, stub
Retrieve service instance for connection. @param host: Which host to connect to. @type host: string @param port: Port @type port: int @param adapter: Adapter @type adapter: string @param version: Version @type version: string @param path: Path @type path: string @param keyFile: ssl key file path @type keyFile: string @param certFile: ssl cert file path @type certFile: string @param connectionPoolTimeout: Timeout in secs for idle connections to close, specify negative numbers for never closing the connections @type connectionPoolTimeout: int
def run(self, root): """Searches for <div class="math"> that are children in <p> tags and corrects the invalid HTML that results""" math_tag_class = self.pelican_mathjax_extension.getConfig('math_tag_class') for parent in root: div_math = [] children = list(parent) for div in parent.findall('div'): if div.get('class') == math_tag_class: div_math.append(children.index(div)) # Do not process further if no displayed math has been found if not div_math: continue insert_idx = list(root).index(parent) self.correct_html(root, children, div_math, insert_idx, parent.text) root.remove(parent) # Parent must be removed last for correct insertion index return root
Searches for <div class="math"> that are children in <p> tags and corrects the invalid HTML that results
async def invoke(self, *args, **kwargs): r"""|coro| Calls a command with the arguments given. This is useful if you want to just call the callback that a :class:`.Command` holds internally. Note ------ You do not pass in the context as it is done for you. Warning --------- The first parameter passed **must** be the command being invoked. Parameters ----------- command: :class:`.Command` A command or subclass of a command that is going to be called. \*args The arguments to to use. \*\*kwargs The keyword arguments to use. """ try: command = args[0] except IndexError: raise TypeError('Missing command to invoke.') from None arguments = [] if command.cog is not None: arguments.append(command.cog) arguments.append(self) arguments.extend(args[1:]) ret = await command.callback(*arguments, **kwargs) return ret
r"""|coro| Calls a command with the arguments given. This is useful if you want to just call the callback that a :class:`.Command` holds internally. Note ------ You do not pass in the context as it is done for you. Warning --------- The first parameter passed **must** be the command being invoked. Parameters ----------- command: :class:`.Command` A command or subclass of a command that is going to be called. \*args The arguments to to use. \*\*kwargs The keyword arguments to use.
def show_data(self, item): """ show data key-value in ListCtrl for tree item """ child, cookie = self.mainview_tree.GetFirstChild(item) child_list = [] while child.IsOk(): child_list.append(child) child, cookie = self.mainview_tree.GetNextChild(item, cookie) lc = self.nodeview_lc lc.DeleteAllItems() for i, child in enumerate(child_list): text = self.mainview_tree.GetItemText(child) try: k, v = [s.strip() for s in text.split(':')] except ValueError: k, v = text, '...' idx = lc.InsertItem(MAXNROW, v) lc.SetItem(idx, 1, k)
show data key-value in ListCtrl for tree item
def fit_model(ts, sc=None): """ Fits a GARCH(1, 1) model to the given time series. Parameters ---------- ts: the time series to which we want to fit a GARCH model as a Numpy array Returns a GARCH model """ assert sc != None, "Missing SparkContext" jvm = sc._jvm jmodel = jvm.com.cloudera.sparkts.models.GARCH.fitModel(_py2java(sc, Vectors.dense(ts))) return GARCHModel(jmodel=jmodel, sc=sc)
Fits a GARCH(1, 1) model to the given time series. Parameters ---------- ts: the time series to which we want to fit a GARCH model as a Numpy array Returns a GARCH model
def update_vlan(self, name, vid, vni): """Adds a new vlan to vni mapping for the interface EosVersion: 4.13.7M Args: vlan (str, int): The vlan id to map to the vni vni (str, int): The vni value to use Returns: True if the command completes successfully """ cmd = 'vxlan vlan %s vni %s' % (vid, vni) return self.configure_interface(name, cmd)
Adds a new vlan to vni mapping for the interface EosVersion: 4.13.7M Args: vlan (str, int): The vlan id to map to the vni vni (str, int): The vni value to use Returns: True if the command completes successfully
def plot(cg): """ Plot the call graph using matplotlib For larger graphs, this should not be used, as it is very slow and probably you can not see anything on it. :param cg: A networkx call graph to plot """ from androguard.core.analysis.analysis import ExternalMethod import matplotlib.pyplot as plt import networkx as nx pos = nx.spring_layout(cg) internal = [] external = [] for n in cg.node: if isinstance(n, ExternalMethod): external.append(n) else: internal.append(n) nx.draw_networkx_nodes(cg, pos=pos, node_color='r', nodelist=internal) nx.draw_networkx_nodes(cg, pos=pos, node_color='b', nodelist=external) nx.draw_networkx_edges(cg, pos, arrow=True) nx.draw_networkx_labels(cg, pos=pos, labels={x: "{} {}".format(x.get_class_name(), x.get_name()) for x in cg.edge}) plt.draw() plt.show()
Plot the call graph using matplotlib For larger graphs, this should not be used, as it is very slow and probably you can not see anything on it. :param cg: A networkx call graph to plot
def _on_timeout(self, _attempts=0): """ Called when the request associated with this ResponseFuture times out. This function may reschedule itself. The ``_attempts`` parameter tracks the number of times this has happened. This parameter should only be set in those cases, where ``_on_timeout`` reschedules itself. """ # PYTHON-853: for short timeouts, we sometimes race with our __init__ if self._connection is None and _attempts < 3: self._timer = self.session.cluster.connection_class.create_timer( 0.01, partial(self._on_timeout, _attempts=_attempts + 1) ) return if self._connection is not None: try: self._connection._requests.pop(self._req_id) # This prevents the race condition of the # event loop thread just receiving the waited message # If it arrives after this, it will be ignored except KeyError: return pool = self.session._pools.get(self._current_host) if pool and not pool.is_shutdown: with self._connection.lock: self._connection.request_ids.append(self._req_id) pool.return_connection(self._connection) errors = self._errors if not errors: if self.is_schema_agreed: key = str(self._current_host.endpoint) if self._current_host else 'no host queried before timeout' errors = {key: "Client request timeout. See Session.execute[_async](timeout)"} else: connection = self.session.cluster.control_connection._connection host = str(connection.endpoint) if connection else 'unknown' errors = {host: "Request timed out while waiting for schema agreement. See Session.execute[_async](timeout) and Cluster.max_schema_agreement_wait."} self._set_final_exception(OperationTimedOut(errors, self._current_host))
Called when the request associated with this ResponseFuture times out. This function may reschedule itself. The ``_attempts`` parameter tracks the number of times this has happened. This parameter should only be set in those cases, where ``_on_timeout`` reschedules itself.
def _get_paths(): """Generate paths to test data. Done in a function to protect namespace a bit.""" import os base_path = os.path.dirname(os.path.abspath(__file__)) test_data_dir = os.path.join(base_path, 'tests', 'data', 'Plate01') test_data_file = os.path.join(test_data_dir, 'RFP_Well_A3.fcs') return test_data_dir, test_data_file
Generate paths to test data. Done in a function to protect namespace a bit.
def disqus_sso_script(context): """ Provides a generic context variable which adds single-sign-on support to DISQUS if ``COMMENTS_DISQUS_API_PUBLIC_KEY`` and ``COMMENTS_DISQUS_API_SECRET_KEY`` are specified. """ settings = context["settings"] public_key = getattr(settings, "COMMENTS_DISQUS_API_PUBLIC_KEY", "") secret_key = getattr(settings, "COMMENTS_DISQUS_API_SECRET_KEY", "") user = context["request"].user if public_key and secret_key and user.is_authenticated(): context["public_key"] = public_key context["sso_data"] = _get_disqus_sso(user, public_key, secret_key) return context
Provides a generic context variable which adds single-sign-on support to DISQUS if ``COMMENTS_DISQUS_API_PUBLIC_KEY`` and ``COMMENTS_DISQUS_API_SECRET_KEY`` are specified.
def resetPassword(self, email=True): """ resets a users password for an account. The password will be randomly generated and emailed by the system. Input: email - boolean that an email password will be sent to the user's profile email address. The default is True. """ url = self.root + "/reset" params = { "f" : "json", "email" : email } return self._post(url=url, param_dict=params, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port)
resets a users password for an account. The password will be randomly generated and emailed by the system. Input: email - boolean that an email password will be sent to the user's profile email address. The default is True.
def file_needs_update(target_file, source_file): """Checks if target_file is not existing or differing from source_file :param target_file: File target for a copy action :param source_file: File to be copied :return: True, if target_file not existing or differing from source_file, else False :rtype: False """ if not os.path.isfile(target_file) or get_md5_file_hash(target_file) != get_md5_file_hash(source_file): return True return False
Checks if target_file is not existing or differing from source_file :param target_file: File target for a copy action :param source_file: File to be copied :return: True, if target_file not existing or differing from source_file, else False :rtype: False
def build(self, pre=None, shortest=False): """Build the ``Ref`` instance by fetching the rule from the GramFuzzer instance and building it :param list pre: The prerequisites list :param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated. """ global REF_LEVEL REF_LEVEL += 1 try: if pre is None: pre = [] #print("{:04d} - {} - {}:{}".format(REF_LEVEL, shortest, self.cat, self.refname)) definition = self.fuzzer.get_ref(self.cat, self.refname) res = utils.val( definition, pre, shortest=(shortest or REF_LEVEL >= self.max_recursion) ) return res # this needs to happen no matter what finally: REF_LEVEL -= 1
Build the ``Ref`` instance by fetching the rule from the GramFuzzer instance and building it :param list pre: The prerequisites list :param bool shortest: Whether or not the shortest reference-chain (most minimal) version of the field should be generated.
def to_dict(self, fields=_all_fields, labels=None): """ Encode the node as a dictionary suitable for JSON serialization. Args: fields: if given, this is a whitelist of fields to include on nodes (`daughters` and `form` are always shown) labels: optional label annotations to embed in the derivation dict; the value is a list of lists matching the structure of the derivation (e.g., `["S" ["NP" ["NNS" ["Dogs"]]] ["VP" ["VBZ" ["bark"]]]]`) Returns: dict: the dictionary representation of the structure """ fields = set(fields) diff = fields.difference(_all_fields) if isinstance(labels, Sequence): labels = _map_labels(self, labels) elif labels is None: labels = {} if diff: raise ValueError( 'Invalid field(s): {}'.format(', '.join(diff)) ) return _to_dict(self, fields, labels)
Encode the node as a dictionary suitable for JSON serialization. Args: fields: if given, this is a whitelist of fields to include on nodes (`daughters` and `form` are always shown) labels: optional label annotations to embed in the derivation dict; the value is a list of lists matching the structure of the derivation (e.g., `["S" ["NP" ["NNS" ["Dogs"]]] ["VP" ["VBZ" ["bark"]]]]`) Returns: dict: the dictionary representation of the structure
def split_unquoted_newlines(stmt): """Split a string on all unquoted newlines. Unlike str.splitlines(), this will ignore CR/LF/CR+LF if the requisite character is inside of a string.""" text = text_type(stmt) lines = SPLIT_REGEX.split(text) outputlines = [''] for line in lines: if not line: continue elif LINE_MATCH.match(line): outputlines.append('') else: outputlines[-1] += line return outputlines
Split a string on all unquoted newlines. Unlike str.splitlines(), this will ignore CR/LF/CR+LF if the requisite character is inside of a string.
def load_backends(self): """ Loads all the backends setup in settings.py. """ for name, backend_settings in settings.storage.iteritems(): backend_path = backend_settings['backend'] backend_module, backend_cls = backend_path.rsplit('.', 1) backend_module = import_module(backend_module) # Create an instance of the configured backend. backend_constructor = getattr(backend_module, backend_cls) self.backends[name] = backend_constructor(name, self.namespaces, **backend_settings)
Loads all the backends setup in settings.py.
def pattern_to_regex(cls, pattern): """ Convert the pattern into a regular expression. *pattern* (:class:`unicode` or :class:`bytes`) is the pattern to convert into a regular expression. Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`, or :data:`None`), and whether matched files should be included (:data:`True`), excluded (:data:`False`), or if it is a null-operation (:data:`None`). """ if isinstance(pattern, unicode): return_type = unicode elif isinstance(pattern, bytes): return_type = bytes pattern = pattern.decode(_BYTES_ENCODING) else: raise TypeError("pattern:{!r} is not a unicode or byte string.".format(pattern)) pattern = pattern.strip() if pattern.startswith('#'): # A pattern starting with a hash ('#') serves as a comment # (neither includes nor excludes files). Escape the hash with a # back-slash to match a literal hash (i.e., '\#'). regex = None include = None elif pattern == '/': # EDGE CASE: According to `git check-ignore` (v2.4.1), a single # '/' does not match any file. regex = None include = None elif pattern: if pattern.startswith('!'): # A pattern starting with an exclamation mark ('!') negates the # pattern (exclude instead of include). Escape the exclamation # mark with a back-slash to match a literal exclamation mark # (i.e., '\!'). include = False # Remove leading exclamation mark. pattern = pattern[1:] else: include = True if pattern.startswith('\\'): # Remove leading back-slash escape for escaped hash ('#') or # exclamation mark ('!'). pattern = pattern[1:] # Split pattern into segments. pattern_segs = pattern.split('/') # Normalize pattern to make processing easier. if not pattern_segs[0]: # A pattern beginning with a slash ('/') will only match paths # directly on the root directory instead of any descendant # paths. So, remove empty first segment to make pattern relative # to root. del pattern_segs[0] elif len(pattern_segs) == 1 or (len(pattern_segs) == 2 and not pattern_segs[1]): # A single pattern without a beginning slash ('/') will match # any descendant path. This is equivalent to "**/{pattern}". So, # prepend with double-asterisks to make pattern relative to # root. # EDGE CASE: This also holds for a single pattern with a # trailing slash (e.g. dir/). if pattern_segs[0] != '**': pattern_segs.insert(0, '**') else: # EDGE CASE: A pattern without a beginning slash ('/') but # contains at least one prepended directory (e.g. # "dir/{pattern}") should not match "**/dir/{pattern}", # according to `git check-ignore` (v2.4.1). pass if not pattern_segs[-1] and len(pattern_segs) > 1: # A pattern ending with a slash ('/') will match all descendant # paths if it is a directory but not if it is a regular file. # This is equivilent to "{pattern}/**". So, set last segment to # double asterisks to include all descendants. pattern_segs[-1] = '**' # Build regular expression from pattern. output = ['^'] need_slash = False end = len(pattern_segs) - 1 for i, seg in enumerate(pattern_segs): if seg == '**': if i == 0 and i == end: # A pattern consisting solely of double-asterisks ('**') # will match every path. output.append('.+') elif i == 0: # A normalized pattern beginning with double-asterisks # ('**') will match any leading path segments. output.append('(?:.+/)?') need_slash = False elif i == end: # A normalized pattern ending with double-asterisks ('**') # will match any trailing path segments. output.append('/.*') else: # A pattern with inner double-asterisks ('**') will match # multiple (or zero) inner path segments. output.append('(?:/.+)?') need_slash = True elif seg == '*': # Match single path segment. if need_slash: output.append('/') output.append('[^/]+') need_slash = True else: # Match segment glob pattern. if need_slash: output.append('/') output.append(cls._translate_segment_glob(seg)) if i == end and include is True: # A pattern ending without a slash ('/') will match a file # or a directory (with paths underneath it). E.g., "foo" # matches "foo", "foo/bar", "foo/bar/baz", etc. # EDGE CASE: However, this does not hold for exclusion cases # according to `git check-ignore` (v2.4.1). output.append('(?:/.*)?') need_slash = True output.append('$') regex = ''.join(output) else: # A blank pattern is a null-operation (neither includes nor # excludes files). regex = None include = None if regex is not None and return_type is bytes: regex = regex.encode(_BYTES_ENCODING) return regex, include
Convert the pattern into a regular expression. *pattern* (:class:`unicode` or :class:`bytes`) is the pattern to convert into a regular expression. Returns the uncompiled regular expression (:class:`unicode`, :class:`bytes`, or :data:`None`), and whether matched files should be included (:data:`True`), excluded (:data:`False`), or if it is a null-operation (:data:`None`).
def get_list(self, id, name=None): ''' Get a list Returns: List: The list with the given `id` ''' return self.create_list(dict(id=id, name=name))
Get a list Returns: List: The list with the given `id`
def preprocessing_declaration(job, config): """ Declare jobs related to preprocessing :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs """ if config.preprocessing: job.fileStore.logToMaster('Ran preprocessing: ' + config.uuid) disk = '1G' if config.ci_test else '20G' mem = '2G' if config.ci_test else '10G' processed_normal = job.wrapJobFn(run_gatk_preprocessing, config.normal_bam, config.normal_bai, config.reference, config.dict, config.fai, config.phase, config.mills, config.dbsnp, mem, cores=1, memory=mem, disk=disk) processed_tumor = job.wrapJobFn(run_gatk_preprocessing, config.tumor_bam, config.tumor_bai, config.reference, config.dict, config.fai, config.phase, config.mills, config.dbsnp, mem, cores=1, memory=mem, disk=disk) static_workflow = job.wrapJobFn(static_workflow_declaration, config, processed_normal.rv(0), processed_normal.rv(1), processed_tumor.rv(0), processed_tumor.rv(1)) job.addChild(processed_normal) job.addChild(processed_tumor) job.addFollowOn(static_workflow) else: job.addFollowOnJobFn(static_workflow_declaration, config, config.normal_bam, config.normal_bai, config.tumor_bam, config.tumor_bai)
Declare jobs related to preprocessing :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs
def add_transformers(line): '''Extract the transformers names from a line of code of the form from __experimental__ import transformer1 [,...] and adds them to the globally known dict ''' assert FROM_EXPERIMENTAL.match(line) line = FROM_EXPERIMENTAL.sub(' ', line) # we now have: " transformer1 [,...]" line = line.split("#")[0] # remove any end of line comments # and insert each transformer as an item in a list for trans in line.replace(' ', '').split(','): import_transformer(trans)
Extract the transformers names from a line of code of the form from __experimental__ import transformer1 [,...] and adds them to the globally known dict
async def send(self, message: Union[str, bytes], binary: bool=False, compress: Optional[int]=None) -> None: """Send a frame over the websocket with message as its payload.""" if isinstance(message, str): message = message.encode('utf-8') if binary: await self._send_frame(message, WSMsgType.BINARY, compress) else: await self._send_frame(message, WSMsgType.TEXT, compress)
Send a frame over the websocket with message as its payload.
def initialize(self, training_info, model, environment, device): """ Initialize policy gradient from reinforcer settings """ if self.trust_region: self.average_model = self.model_factory.instantiate(action_space=environment.action_space).to(device) self.average_model.load_state_dict(model.state_dict())
Initialize policy gradient from reinforcer settings
def _prepare_sample(data, run_folder): """Extract passed keywords from input LIMS information. """ want = set(["description", "files", "genome_build", "name", "analysis", "upload", "algorithm"]) out = {} for k, v in data.items(): if k in want: out[k] = _relative_paths(v, run_folder) if "algorithm" not in out: analysis, algorithm = _select_default_algorithm(out.get("analysis")) out["algorithm"] = algorithm out["analysis"] = analysis description = "%s-%s" % (out["name"], clean_name(out["description"])) out["name"] = [out["name"], description] out["description"] = description return out
Extract passed keywords from input LIMS information.
def check_privatenet(self): """ Check if privatenet is running, and if container is same as the current Chains/privnet database. Raises: PrivnetConnectionError: if the private net couldn't be reached or the nonce does not match """ rpc_settings.setup(self.RPC_LIST) client = RPCClient() try: version = client.get_version() except NEORPCException: raise PrivnetConnectionError("Error: private network container doesn't seem to be running, or RPC is not enabled.") print("Privatenet useragent '%s', nonce: %s" % (version["useragent"], version["nonce"])) # Now check if nonce is the same as in the chain path nonce_container = str(version["nonce"]) neopy_chain_meta_filename = os.path.join(self.chain_leveldb_path, ".privnet-nonce") if os.path.isfile(neopy_chain_meta_filename): nonce_chain = open(neopy_chain_meta_filename, "r").read() if nonce_chain != nonce_container: raise PrivnetConnectionError( "Chain database in Chains/privnet is for a different private network than the current container. " "Consider deleting the Chain directory with 'rm -rf %s*'." % self.chain_leveldb_path ) else: # When the Chains/privnet folder is removed, we need to create the directory if not os.path.isdir(self.chain_leveldb_path): os.mkdir(self.chain_leveldb_path) # Write the nonce to the meta file with open(neopy_chain_meta_filename, "w") as f: f.write(nonce_container)
Check if privatenet is running, and if container is same as the current Chains/privnet database. Raises: PrivnetConnectionError: if the private net couldn't be reached or the nonce does not match
def pilot_PLL(xr,fq,fs,loop_type,Bn,zeta): """ theta, phi_error = pilot_PLL(xr,fq,fs,loop_type,Bn,zeta) Mark Wickert, April 2014 """ T = 1/float(fs) # Set the VCO gain in Hz/V Kv = 1.0 # Design a lowpass filter to remove the double freq term Norder = 5 b_lp,a_lp = signal.butter(Norder,2*(fq/2.)/float(fs)) fstate = np.zeros(Norder) # LPF state vector Kv = 2*np.pi*Kv # convert Kv in Hz/v to rad/s/v if loop_type == 1: # First-order loop parameters fn = Bn Kt = 2*np.pi*fn # loop natural frequency in rad/s elif loop_type == 2: # Second-order loop parameters fn = 1/(2*np.pi)*2*Bn/(zeta + 1/(4*zeta)) # given Bn in Hz Kt = 4*np.pi*zeta*fn # loop natural frequency in rad/s a = np.pi*fn/zeta else: print('Loop type must be 1 or 2') # Initialize integration approximation filters filt_in_last = 0 filt_out_last = 0 vco_in_last = 0 vco_out = 0 vco_out_last = 0 # Initialize working and final output vectors n = np.arange(0,len(xr)) theta = np.zeros(len(xr)) ev = np.zeros(len(xr)) phi_error = np.zeros(len(xr)) # Normalize total power in an attemp to make the 19kHz sinusoid # component have amplitude ~1. #xr = xr/(2/3*std(xr)); # Begin the simulation loop for kk in range(len(n)): # Sinusoidal phase detector (simple multiplier) phi_error[kk] = 2*xr[kk]*np.sin(vco_out) # LPF to remove double frequency term phi_error[kk],fstate = signal.lfilter(b_lp,a_lp,np.array([phi_error[kk]]),zi=fstate) pd_out = phi_error[kk] #pd_out = 0 # Loop gain gain_out = Kt/Kv*pd_out # apply VCO gain at VCO # Loop filter if loop_type == 2: filt_in = a*gain_out filt_out = filt_out_last + T/2.*(filt_in + filt_in_last) filt_in_last = filt_in filt_out_last = filt_out filt_out = filt_out + gain_out else: filt_out = gain_out # VCO vco_in = filt_out + fq/(Kv/(2*np.pi)) # bias to quiescent freq. vco_out = vco_out_last + T/2.*(vco_in + vco_in_last) vco_in_last = vco_in vco_out_last = vco_out vco_out = Kv*vco_out # apply Kv # Measured loop signals ev[kk] = filt_out theta[kk] = np.mod(vco_out,2*np.pi); # The vco phase mod 2pi return theta,phi_error
theta, phi_error = pilot_PLL(xr,fq,fs,loop_type,Bn,zeta) Mark Wickert, April 2014
def create_context_menu(self, event, shape): ''' Parameters ---------- event : gtk.gdk.Event GTK mouse click event. shape : str Electrode shape identifier (e.g., `"electrode028"`). Returns ------- gtk.Menu Context menu. .. versionchanged:: 0.13 - Deprecate hard-coded commands (e.g., clear electrodes, clear routes). - Add anonymous global commands section at head of menu (i.e., commands not specific to an electrode or route). - Add "Electrode" and "Route(s)" sub-menus. ''' routes = self.df_routes.loc[self.df_routes.electrode_i == shape, 'route_i'].astype(int).unique().tolist() def _connect_callback(menu_item, command_signal, group, command, data): callback_called = threading.Event() def _callback(signal, widget, *args): if callback_called.is_set(): return callback_called.set() _L().debug('`%s`: %s %s %s', signal, group, command, data) gtk.idle_add(self.emit, command_signal, group, command, data) menu_item.connect('activate', ft.partial(_callback, 'activate')) menu_item.connect('button-press-event', ft.partial(_callback, 'button-press-event')) if group is not None: menu_item.set_tooltip_text(group) menu = gtk.Menu() # Add menu items/groups for registered global commands. if self.global_commands: data = {'event': event.copy()} command_signal = 'global-command' for group, commands in self.global_commands.iteritems(): for command, title in commands.iteritems(): menu_item_j = gtk.MenuItem(title) menu.append(menu_item_j) _connect_callback(menu_item_j, command_signal, group, command, data) # Add menu items/groups for registered electrode commands. if self.electrode_commands: separator = gtk.SeparatorMenuItem() menu.append(separator) # Add electrode sub-menu. menu_e = gtk.Menu() menu_head_e = gtk.MenuItem('_Electrode') menu_head_e.set_submenu(menu_e) menu_head_e.set_use_underline(True) menu.append(menu_head_e) command_signal = 'electrode-command' data = {'electrode_id': shape, 'event': event.copy()} for group, commands in self.electrode_commands.iteritems(): for command, title in commands.iteritems(): menu_item_j = gtk.MenuItem(title) menu_e.append(menu_item_j) _connect_callback(menu_item_j, command_signal, group, command, data) # Add menu items/groups for registered route commands. if routes and self.route_commands: # TODO: Refactor electrode/route command menu code to reduce code # duplication (i.e., DRY). separator = gtk.SeparatorMenuItem() menu.append(separator) # Add route sub-menu. menu_r = gtk.Menu() menu_head_r = gtk.MenuItem('_Route(s)') menu_head_r.set_submenu(menu_r) menu_head_r.set_use_underline(True) menu.append(menu_head_r) command_signal = 'route-command' data = {'route_ids': routes, 'event': event.copy()} for group, commands in self.route_commands.iteritems(): for command, title in commands.iteritems(): menu_item_j = gtk.MenuItem(title) menu_r.append(menu_item_j) _connect_callback(menu_item_j, command_signal, group, command, data) menu.show_all() return menu
Parameters ---------- event : gtk.gdk.Event GTK mouse click event. shape : str Electrode shape identifier (e.g., `"electrode028"`). Returns ------- gtk.Menu Context menu. .. versionchanged:: 0.13 - Deprecate hard-coded commands (e.g., clear electrodes, clear routes). - Add anonymous global commands section at head of menu (i.e., commands not specific to an electrode or route). - Add "Electrode" and "Route(s)" sub-menus.
def delete_fastqs(job, patient_dict): """ Delete the fastqs from the job Store once their purpose has been achieved (i.e. after all mapping steps) :param dict patient_dict: Dict of list of input fastqs """ for key in patient_dict.keys(): if 'fastq' not in key: continue job.fileStore.logToMaster('Deleting "%s:%s" ' % (patient_dict['patient_id'], key) + 'from the filestore.') job.fileStore.deleteGlobalFile(patient_dict[key]) return None
Delete the fastqs from the job Store once their purpose has been achieved (i.e. after all mapping steps) :param dict patient_dict: Dict of list of input fastqs
async def update_template_context(self, context: dict) -> None: """Update the provided template context. This adds additional context from the various template context processors. Arguments: context: The context to update (mutate). """ processors = self.template_context_processors[None] if has_request_context(): blueprint = _request_ctx_stack.top.request.blueprint if blueprint is not None and blueprint in self.template_context_processors: processors = chain(processors, self.template_context_processors[blueprint]) # type: ignore # noqa extra_context: dict = {} for processor in processors: extra_context.update(await processor()) original = context.copy() context.update(extra_context) context.update(original)
Update the provided template context. This adds additional context from the various template context processors. Arguments: context: The context to update (mutate).
def tangent_approx(f: SYM, x: SYM, a: SYM = None, assert_linear: bool = False) -> Dict[str, SYM]: """ Create a tangent approximation of a non-linear function f(x) about point a using a block lower triangular solver 0 = f(x) = f(a) + J*x # taylor series about a (if f(x) linear in x, then globally valid) J*x = -f(a) # solve for x x = -J^{-1}f(a) # but inverse is slow, so we use solve where J = df/dx """ # find f(a) if a is None: a = ca.DM.zeros(x.numel(), 1) f_a = ca.substitute(f, x, a) # f(a) J = ca.jacobian(f, x) if assert_linear and ca.depends_on(J, x): raise AssertionError('not linear') # solve is smart enough to to convert to blt if necessary return ca.solve(J, -f_a)
Create a tangent approximation of a non-linear function f(x) about point a using a block lower triangular solver 0 = f(x) = f(a) + J*x # taylor series about a (if f(x) linear in x, then globally valid) J*x = -f(a) # solve for x x = -J^{-1}f(a) # but inverse is slow, so we use solve where J = df/dx
def parse(self): """ parse data """ url = self.config.get('url') self.cnml = CNMLParser(url) self.parsed_data = self.cnml.getNodes()
parse data
def get_qtls_from_mapqtl_data(matrix, threshold, inputfile): """Extract the QTLs found by MapQTL reading its file. This assume that there is only one QTL per linkage group. :arg matrix, the MapQTL file read in memory :arg threshold, threshold used to determine if a given LOD value is reflective the presence of a QTL. :arg inputfile, name of the inputfile in which the QTLs have been found """ trait_name = inputfile.split(')_', 1)[1].split('.mqo')[0] qtls = [] qtl = None for entry in matrix[1:]: if qtl is None: qtl = entry if qtl[1] != entry[1]: if float(qtl[4]) > float(threshold): qtl[0] = trait_name qtls.append(qtl) qtl = entry if entry[4] == '': # pragma: no cover entry[4] = 0 if qtl[4] == '': # pragma: no cover qtl[4] = 0 if float(entry[4]) > float(qtl[4]): qtl = entry if float(qtl[4]) > float(threshold): qtl[0] = trait_name if qtl not in qtls: qtls.append(qtl) return qtls
Extract the QTLs found by MapQTL reading its file. This assume that there is only one QTL per linkage group. :arg matrix, the MapQTL file read in memory :arg threshold, threshold used to determine if a given LOD value is reflective the presence of a QTL. :arg inputfile, name of the inputfile in which the QTLs have been found
def get_importable_modules(folder): """Find all module files in the given folder that end with '.py' and don't start with an underscore. @return module names @rtype: iterator of string """ for fname in os.listdir(folder): if fname.endswith('.py') and not fname.startswith('_'): yield fname[:-3]
Find all module files in the given folder that end with '.py' and don't start with an underscore. @return module names @rtype: iterator of string
def _cleanup_label(label): """ Reformat the ALL CAPS OMIM labels to something more pleasant to read. This will: 1. remove the abbreviation suffixes 2. convert the roman numerals to integer numbers 3. make the text title case, except for suplied conjunctions/prepositions/articles :param label: :return: """ conjunctions = ['and', 'but', 'yet', 'for', 'nor', 'so'] little_preps = [ 'at', 'by', 'in', 'of', 'on', 'to', 'up', 'as', 'it', 'or'] articles = ['a', 'an', 'the'] # remove the abbreviation lbl = label.split(r';')[0] fixedwords = [] i = 0 for wrd in lbl.split(): i += 1 # convert the roman numerals to numbers, # but assume that the first word is not # a roman numeral (this permits things like "X inactivation" if i > 1 and re.match(romanNumeralPattern, wrd): n = fromRoman(wrd) # make the assumption that the number of syndromes are <100 # this allows me to retain "SYNDROME C" # and not convert it to "SYNDROME 100" if 0 < n < 100: # get the non-roman suffix, if present. # for example, IIIB or IVA suffix = wrd.replace(toRoman(n), '', 1) fixed = ''.join((str(n), suffix)) wrd = fixed # capitalize first letter wrd = wrd.title() # replace interior conjunctions, prepositions, # and articles with lowercase if wrd.lower() in (conjunctions+little_preps+articles) and i != 1: wrd = wrd.lower() fixedwords.append(wrd) lbl = ' '.join(fixedwords) # print (label, '-->', lbl) return lbl
Reformat the ALL CAPS OMIM labels to something more pleasant to read. This will: 1. remove the abbreviation suffixes 2. convert the roman numerals to integer numbers 3. make the text title case, except for suplied conjunctions/prepositions/articles :param label: :return:
def prefix_dict_keys(d: Dict[str, Any], prefix: str) -> Dict[str, Any]: """ Returns a dictionary that's a copy of as ``d`` but with ``prefix`` prepended to its keys. """ result = {} # type: Dict[str, Any] for k, v in d.items(): result[prefix + k] = v return result
Returns a dictionary that's a copy of as ``d`` but with ``prefix`` prepended to its keys.
def delete(self): """ Remove this file """ self.room.check_owner() self.conn.make_call("deleteFiles", [self.fid])
Remove this file
def get(self, request, *args, **kwargs): """ Do the login and password protection. """ response = super(EntryProtectionMixin, self).get( request, *args, **kwargs) if self.object.login_required and not request.user.is_authenticated: return self.login() if (self.object.password and self.object.password != self.request.session.get(self.session_key % self.object.pk)): return self.password() return response
Do the login and password protection.
def makeNetwork(self): """Makes graph object from .gdf loaded data""" if "weight" in self.data_friendships.keys(): self.G=G=x.DiGraph() else: self.G=G=x.Graph() F=self.data_friends for friendn in range(self.n_friends): if "posts" in F.keys(): G.add_node(F["name"][friendn], label=F["label"][friendn], posts=F["posts"][friendn]) elif "agerank" in F.keys(): G.add_node(F["name"][friendn], label=F["label"][friendn], gender=F["sex"][friendn], locale=F["locale"][friendn], agerank=F["agerank"][friendn]) else: G.add_node(F["name"][friendn], label=F["label"][friendn], gender=F["sex"][friendn], locale=F["locale"][friendn]) F=self.data_friendships for friendshipn in range(self.n_friendships): if "weight" in F.keys(): G.add_edge(F["node1"][friendshipn],F["node2"][friendshipn],weight=F["weight"][friendshipn]) else: G.add_edge(F["node1"][friendshipn],F["node2"][friendshipn])
Makes graph object from .gdf loaded data
def get_or_create_head(root): """Ensures that `root` contains a <head> element and returns it. """ head = _create_cssselector("head")(root) if not head: head = etree.Element("head") body = _create_cssselector("body")(root)[0] body.getparent().insert(0, head) return head else: return head[0]
Ensures that `root` contains a <head> element and returns it.
def search(self, text, lookup=None): '''Returns a new :class:`Query` for :attr:`Manager.model` with a full text search value.''' return self.query().search(text, lookup=lookup)
Returns a new :class:`Query` for :attr:`Manager.model` with a full text search value.
def revoke_token(self, token, headers=None, **kwargs): """ Revoke an access token """ self._check_configuration("site", "revoke_uri") url = "%s%s" % (self.site, quote(self.revoke_url)) data = {'token': token} data.update(kwargs) return self._make_request(url, data=data, headers=headers)
Revoke an access token
def __call_api(self, path, params=None, api_url=FORECAST_URL): """ Call the datapoint api using the requests module """ if not params: params = dict() payload = {'key': self.api_key} payload.update(params) url = "%s/%s" % (api_url, path) # Add a timeout to the request. # The value of 1 second is based on attempting 100 connections to # datapoint and taking ten times the mean connection time (rounded up). # Could expose to users in the functions which need to call the api. #req = requests.get(url, params=payload, timeout=1) # The wrapper function __retry_session returns a requests.Session # object. This has a .get() function like requests.get(), so the use # doesn't change here. sess = self.__retry_session() req = sess.get(url, params=payload, timeout=1) try: data = req.json() except ValueError: raise APIException("DataPoint has not returned any data, this could be due to an incorrect API key") self.call_response = data if req.status_code != 200: msg = [data[m] for m in ("message", "error_message", "status") \ if m in data][0] raise Exception(msg) return data
Call the datapoint api using the requests module
def plot_3d_dist(Z, X, Y, N=1000, AxisOffset=0, Angle=-40, LowLim=None, HighLim=None, show_fig=True): """ Plots Z, X and Y as a 3d scatter plot with heatmaps of each axis pair. Parameters ---------- Z : ndarray Array of Z positions with time X : ndarray Array of X positions with time Y : ndarray Array of Y positions with time N : optional, int Number of time points to plot (Defaults to 1000) AxisOffset : optional, double Offset to add to each axis from the data - used to get a better view of the heat maps (Defaults to 0) LowLim : optional, double Lower limit of x, y and z axis HighLim : optional, double Upper limit of x, y and z axis show_fig : optional, bool Whether to show the produced figure before returning Returns ------- fig : matplotlib.figure.Figure object The figure object created ax : matplotlib.axes.Axes object The subplot object created """ angle = Angle fig = _plt.figure(figsize=(8, 6)) ax = fig.add_subplot(111, projection='3d') y = Z[0:N] x = X[0:N] z = Y[0:N] ax.scatter(x, y, z, alpha=0.3) xlim = ax.get_xlim() ylim = ax.get_ylim() zlim = ax.get_zlim() if LowLim != None: lowLim = LowLim - AxisOffset else: lowLim = min([xlim[0], ylim[0], zlim[0]]) - AxisOffset if HighLim != None: highLim = HighLim + AxisOffset else: highLim = max([xlim[1], ylim[1], zlim[1]]) + AxisOffset ax.set_xlim([lowLim, highLim]) ax.set_ylim([lowLim, highLim]) ax.set_zlim([lowLim, highLim]) ax.set_xlabel("x") ax.set_ylabel("z") ax.set_zlabel("y") ax.view_init(30, angle) h, yedges, zedges = _np.histogram2d(y, z, bins=50) h = h.transpose() normalized_map = _plt.cm.Blues(h/h.max()) yy, zz = _np.meshgrid(yedges, zedges) xpos = lowLim # Plane of histogram xflat = _np.full_like(yy, xpos) p = ax.plot_surface(xflat, yy, zz, facecolors=normalized_map, rstride=1, cstride=1, shade=False) h, xedges, zedges = _np.histogram2d(x, z, bins=50) h = h.transpose() normalized_map = _plt.cm.Blues(h/h.max()) xx, zz = _np.meshgrid(xedges, zedges) ypos = highLim # Plane of histogram yflat = _np.full_like(xx, ypos) p = ax.plot_surface(xx, yflat, zz, facecolors=normalized_map, rstride=1, cstride=1, shade=False) h, yedges, xedges = _np.histogram2d(y, x, bins=50) h = h.transpose() normalized_map = _plt.cm.Blues(h/h.max()) yy, xx = _np.meshgrid(yedges, xedges) zpos = lowLim # Plane of histogram zflat = _np.full_like(yy, zpos) p = ax.plot_surface(xx, yy, zflat, facecolors=normalized_map, rstride=1, cstride=1, shade=False) if show_fig == True: _plt.show() return fig, ax
Plots Z, X and Y as a 3d scatter plot with heatmaps of each axis pair. Parameters ---------- Z : ndarray Array of Z positions with time X : ndarray Array of X positions with time Y : ndarray Array of Y positions with time N : optional, int Number of time points to plot (Defaults to 1000) AxisOffset : optional, double Offset to add to each axis from the data - used to get a better view of the heat maps (Defaults to 0) LowLim : optional, double Lower limit of x, y and z axis HighLim : optional, double Upper limit of x, y and z axis show_fig : optional, bool Whether to show the produced figure before returning Returns ------- fig : matplotlib.figure.Figure object The figure object created ax : matplotlib.axes.Axes object The subplot object created
def trade_signals_handler(self, signals): ''' Process buy and sell signals from the simulation ''' alloc = {} if signals['buy'] or signals['sell']: # Compute the optimal portfolio allocation, # Using user defined function try: alloc, e_ret, e_risk = self.optimize( self.date, signals['buy'], signals['sell'], self._optimizer_parameters) except Exception, error: raise PortfolioOptimizationFailed( reason=error, date=self.date, data=signals) return _remove_useless_orders(alloc)
Process buy and sell signals from the simulation
def _kwarg(self, kwargs, kwname, default=None): """ Resolves keyword arguments from constructor or :doc:`config`. .. note:: The keyword arguments take this order of precedence: 1. Arguments passed to constructor through the :func:`authomatic.login`. 2. Provider specific arguments from :doc:`config`. 3. Arguments from :doc:`config` set in the ``__defaults__`` key. 2. The value from :data:`default` argument. :param dict kwargs: Keyword arguments dictionary. :param str kwname: Name of the desired keyword argument. """ return kwargs.get(kwname) or \ self.settings.config.get(self.name, {}).get(kwname) or \ self.settings.config.get('__defaults__', {}).get(kwname) or \ default
Resolves keyword arguments from constructor or :doc:`config`. .. note:: The keyword arguments take this order of precedence: 1. Arguments passed to constructor through the :func:`authomatic.login`. 2. Provider specific arguments from :doc:`config`. 3. Arguments from :doc:`config` set in the ``__defaults__`` key. 2. The value from :data:`default` argument. :param dict kwargs: Keyword arguments dictionary. :param str kwname: Name of the desired keyword argument.
def to_csv(weekmatrices, filename, digits=5): """ Exports a list of week-matrices to a specified filename in the CSV format. Parameters ---------- weekmatrices : list The week-matrices to export. filename : string Path for the exported CSV file. """ with open(filename, 'w') as f: w = csv.writer(f, lineterminator='\n') w.writerow(['year_week', 'channel', 'weekday', 'section', 'value']) def make_repr(item): if item is None: return None elif isinstance(item, float): return repr(round(item, digits)) else: return str(item) for row in weekmatrices: w.writerow([make_repr(item) for item in row])
Exports a list of week-matrices to a specified filename in the CSV format. Parameters ---------- weekmatrices : list The week-matrices to export. filename : string Path for the exported CSV file.
def plot_clock_diagrams(self, colormap="summer"): """ Ploting clock diagrams - one or more rings around residue name and id (and chain id). The rings show the fraction of simulation time this residue has spent in the vicinity of the ligand - characterised by distance. """ cmap = plt.get_cmap(colormap) for res in self.topology_data.dict_of_plotted_res: colors = [cmap(i) for i in numpy.linspace(0, 1, len(self.topology_data.dict_of_plotted_res[res]))] traj_colors_ = {traj:colors[i] for i,traj in enumerate(self.topology_data.dict_of_plotted_res[res])} plt.figure(figsize=(2.25, 2.25)) ring_number=[sum(1 for x in v if x) for k,v in self.topology_data.dict_of_plotted_res.items()][0] self.topology_data.ring_number = ring_number rings=[] # When only a few rings to plot they can be thicker if ring_number<2: width = 0.3 else: width = 0.2 for ring in range(0,ring_number): ring,_=plt.pie([self.topology_data.dict_of_plotted_res[res][ring],1-self.topology_data.dict_of_plotted_res[res][ring]], radius=0.9+width*(ring+1), startangle=90, colors=[colors[ring],"white"], counterclock=False) rings=rings+ring plt.setp(rings, width=width) if len(self.topology_data.universe.protein.segments)<=1: #Settings with domain plt.text(-0.0,-0.62,res[0]+"\n"+res[1],ha='center',size=32, fontweight='bold') else: plt.text(-0.0,-0.72,res[0]+"\n"+res[1]+"\n"+res[2],ha='center',size=25, fontweight='bold') pylab.savefig(res[1]+res[2]+".svg", dpi=300, transparent=True)
Ploting clock diagrams - one or more rings around residue name and id (and chain id). The rings show the fraction of simulation time this residue has spent in the vicinity of the ligand - characterised by distance.
def get_attribute_values(self, att_name): """ Returns the values of attribute "att_name" of CPE Name. By default a only element in each part. :param string att_name: Attribute name to get :returns: List of attribute values :rtype: list :exception: ValueError - invalid attribute name """ lc = [] if not CPEComponent.is_valid_attribute(att_name): errmsg = "Invalid attribute name: {0}".format(att_name) raise ValueError(errmsg) for pk in CPE.CPE_PART_KEYS: elements = self.get(pk) for elem in elements: comp = elem.get(att_name) if (isinstance(comp, CPEComponentEmpty) or isinstance(comp, CPEComponentUndefined)): value = CPEComponent1_1.VALUE_EMPTY else: value = comp.get_value() lc.append(value) return lc
Returns the values of attribute "att_name" of CPE Name. By default a only element in each part. :param string att_name: Attribute name to get :returns: List of attribute values :rtype: list :exception: ValueError - invalid attribute name
def list_vms_sub(access_token, subscription_id): '''List VMs in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of a list of VM model views. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/providers/Microsoft.Compute/virtualMachines', '?api-version=', COMP_API]) return do_get_next(endpoint, access_token)
List VMs in a subscription. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. Returns: HTTP response. JSON body of a list of VM model views.
def send_async(self, transaction, headers=None): """Submit a transaction to the Federation with the mode `async`. Args: transaction (dict): the transaction to be sent to the Federation node(s). headers (dict): Optional headers to pass to the request. Returns: dict: The transaction sent to the Federation node(s). """ return self.transport.forward_request( method='POST', path=self.path, json=transaction, params={'mode': 'async'}, headers=headers)
Submit a transaction to the Federation with the mode `async`. Args: transaction (dict): the transaction to be sent to the Federation node(s). headers (dict): Optional headers to pass to the request. Returns: dict: The transaction sent to the Federation node(s).
def collection_keys(coll, sep='.'): """Get a list of all (including nested) keys in a collection. Examines the first document in the collection. :param sep: Separator for nested keys :return: List of str """ def _keys(x, pre=''): for k in x: yield (pre + k) if isinstance(x[k], dict): for nested in _keys(x[k], pre + k + sep): yield nested return list(_keys(coll.find_one()))
Get a list of all (including nested) keys in a collection. Examines the first document in the collection. :param sep: Separator for nested keys :return: List of str
def extract(args): """ %prog extract [--options] ace_file Extract contigs from ace file and if necessary reformat header with a pipe(|) separated list of constituent reads. """ p = OptionParser(extract.__doc__) p.add_option("--format", default=False, action="store_true", help="enable flag to reformat header into a symbol separated list of constituent reads "+ \ "[default: %default]") p.add_option("--singlets", default=False, action="store_true", help="ask the program to look in the singlets file (should be in the same folder) for " +\ "unused reads and put them in the resultant fasta file [default: %default]") p.set_sep(sep="|", help="Separator used to list the reads in the FASTA header") opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) acefile, = args ace = Ace.read(must_open(acefile)) logging.debug('Loaded ace file {0}'.format(acefile)) fastafile = acefile.rsplit(".", 1)[0] + ".fasta" fw = open(fastafile, "w") for c in ace.contigs: id = c.name if opts.format: id = opts.sep.join([read.name for read in c.af]) seqrec = SeqRecord(Seq(c.sequence), id=id, description="") SeqIO.write([seqrec], fw, "fasta") if opts.singlets: singletsfile = acefile.rsplit(".", 1)[0] + ".singlets" if os.path.getsize(singletsfile) > 0: fp = SeqIO.parse(must_open(singletsfile), "fasta") for rec in fp: SeqIO.write(rec, fw, "fasta") fw.close() logging.debug('Wrote contigs to fasta file {0}'.format(fastafile))
%prog extract [--options] ace_file Extract contigs from ace file and if necessary reformat header with a pipe(|) separated list of constituent reads.
def list_storage_accounts_rg(access_token, subscription_id, rgname): '''List the storage accounts in the specified resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. JSON body list of storage accounts. ''' endpoint = ''.join([get_rm_endpoint(), '/subscriptions/', subscription_id, '/resourcegroups/', rgname, '/providers/Microsoft.Storage/storageAccounts', '?api-version=', STORAGE_API]) return do_get(endpoint, access_token)
List the storage accounts in the specified resource group. Args: access_token (str): A valid Azure authentication token. subscription_id (str): Azure subscription id. rgname (str): Azure resource group name. Returns: HTTP response. JSON body list of storage accounts.
def display_monthly_returns(self): """ Display a table containing monthly returns and ytd returns for every year in range. """ data = [['Year', 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec', 'YTD']] for k in self.return_table.index: r = self.return_table.loc[k].values data.append([k] + [fmtpn(x) for x in r]) print(tabulate(data, headers='firstrow'))
Display a table containing monthly returns and ytd returns for every year in range.
def _get_rest_doc(self, request, start_response): """Sends back HTTP response with API directory. This calls start_response and returns the response body. It will return the discovery doc for the requested api/version. Args: request: An ApiRequest, the transformed request sent to the Discovery API. start_response: A function with semantics defined in PEP-333. Returns: A string, the response body. """ api = request.body_json['api'] version = request.body_json['version'] generator = discovery_generator.DiscoveryGenerator(request=request) services = [s for s in self._backend.api_services if s.api_info.name == api and s.api_info.api_version == version] doc = generator.pretty_print_config_to_json(services) if not doc: error_msg = ('Failed to convert .api to discovery doc for ' 'version %s of api %s') % (version, api) _logger.error('%s', error_msg) return util.send_wsgi_error_response(error_msg, start_response) return self._send_success_response(doc, start_response)
Sends back HTTP response with API directory. This calls start_response and returns the response body. It will return the discovery doc for the requested api/version. Args: request: An ApiRequest, the transformed request sent to the Discovery API. start_response: A function with semantics defined in PEP-333. Returns: A string, the response body.
def _run_hooks(self, name, module): """ Run all hooks for a module. """ hooks = self.post_load_hooks.pop(name, []) for hook in hooks: hook(module)
Run all hooks for a module.
def RemoveObject(self, identifier): """Removes a cached object based on the identifier. This method ignores the cache value reference count. Args: identifier (str): VFS object identifier. Raises: KeyError: if the VFS object is not found in the cache. """ if identifier not in self._values: raise KeyError('Missing cached object for identifier: {0:s}'.format( identifier)) del self._values[identifier]
Removes a cached object based on the identifier. This method ignores the cache value reference count. Args: identifier (str): VFS object identifier. Raises: KeyError: if the VFS object is not found in the cache.