positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def forge_relationship(self, left_id, left_type, right_id, right_type, rel_type='Related To', rel_date=None, rel_confidence='high', rel_reason=''): """ Forges a relationship between two TLOs. Args: left_id: The CRITs ID of the first indicator left_type: The CRITs TLO type of the first indicator right_id: The CRITs ID of the second indicator right_type: The CRITs TLO type of the second indicator rel_type: The relationships type ("Related To", etc) rel_date: datetime.datetime object for the date of the relationship. If left blank, it will be datetime.datetime.now() rel_confidence: The relationship confidence (high, medium, low) rel_reason: Reason for the relationship. Returns: True if the relationship was created. False otherwise. """ if not rel_date: rel_date = datetime.datetime.now() type_trans = self._type_translation(left_type) submit_url = '{}/{}/{}/'.format(self.url, type_trans, left_id) params = { 'api_key': self.api_key, 'username': self.username, } data = { 'action': 'forge_relationship', 'right_type': right_type, 'right_id': right_id, 'rel_type': rel_type, 'rel_date': rel_date, 'rel_confidence': rel_confidence, 'rel_reason': rel_reason } r = requests.patch(submit_url, params=params, data=data, proxies=self.proxies, verify=self.verify) if r.status_code == 200: log.debug('Relationship built successfully: {0} <-> ' '{1}'.format(left_id, right_id)) return True else: log.error('Error with status code {0} and message {1} between ' 'these indicators: {2} <-> ' '{3}'.format(r.status_code, r.text, left_id, right_id)) return False
Forges a relationship between two TLOs. Args: left_id: The CRITs ID of the first indicator left_type: The CRITs TLO type of the first indicator right_id: The CRITs ID of the second indicator right_type: The CRITs TLO type of the second indicator rel_type: The relationships type ("Related To", etc) rel_date: datetime.datetime object for the date of the relationship. If left blank, it will be datetime.datetime.now() rel_confidence: The relationship confidence (high, medium, low) rel_reason: Reason for the relationship. Returns: True if the relationship was created. False otherwise.
def get_constraint(self, twig=None, **kwargs): """ Filter in the 'constraint' context :parameter str constraint: name of the constraint (optional) :parameter **kwargs: any other tags to do the filter (except constraint or context) :return: :class:`phoebe.parameters.parameters.ParameterSet` """ if twig is not None: kwargs['twig'] = twig kwargs['context'] = 'constraint' return self.get(**kwargs)
Filter in the 'constraint' context :parameter str constraint: name of the constraint (optional) :parameter **kwargs: any other tags to do the filter (except constraint or context) :return: :class:`phoebe.parameters.parameters.ParameterSet`
def _Rzderiv(self,R,z,phi=0.,t=0.): """ NAME: _Rzderiv PURPOSE: evaluate the mixed radial, vertical derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the mixed radial, vertical derivative """ if not self.isNonAxi: phi= 0. x,y,z= self._compute_xyz(R,phi,z,t) phixza= self._2ndderiv_xyz(x,y,z,0,2) phiyza= self._2ndderiv_xyz(x,y,z,1,2) ang = self._omegab*t + self._pa c, s = np.cos(ang), np.sin(ang) phixz = c*phixza + s*phiyza phiyz = -s*phixza + c*phiyza return np.cos(phi)*phixz + np.sin(phi)*phiyz
NAME: _Rzderiv PURPOSE: evaluate the mixed radial, vertical derivative for this potential INPUT: R - Galactocentric cylindrical radius z - vertical height phi - azimuth t - time OUTPUT: the mixed radial, vertical derivative
def make_job_details(self, row_idx): """Create a `JobDetails` from an `astropy.table.row.Row` """ row = self._table[row_idx] job_details = JobDetails.create_from_row(row) job_details.get_file_paths(self._file_archive, self._table_id_array) self._cache[job_details.fullkey] = job_details return job_details
Create a `JobDetails` from an `astropy.table.row.Row`
def dir(): """Return the list of patched function names. Used for patching functions imported from the module. """ dir = [ 'abspath', 'dirname', 'exists', 'expanduser', 'getatime', 'getctime', 'getmtime', 'getsize', 'isabs', 'isdir', 'isfile', 'islink', 'ismount', 'join', 'lexists', 'normcase', 'normpath', 'realpath', 'relpath', 'split', 'splitdrive' ] if IS_PY2: dir.append('walk') if sys.platform != 'win32' or not IS_PY2: dir.append('samefile') return dir
Return the list of patched function names. Used for patching functions imported from the module.
def start(cls, ev=None): """ Start the query to aleph by ISSN. """ ViewController.log_view.add("Beginning AlephReader request..") ViewController.issnbox_error.reset() issn = ViewController.issn.strip() # make sure, that `issn` was filled if not issn: ViewController.issnbox_error.show("ISSN nebylo vyplněno!") ViewController.log_view.add("No ISSN! Aborting.") return ViewController.issnbox_error.hide() ViewController.issn_progressbar.reset() ViewController.issn_progressbar.show(50) ViewController.log_view.add("For ISSN `%s`." % issn) make_request( url=join(settings.API_PATH, "aleph/records_by_issn"), data={'issn': issn}, on_complete=cls.on_complete )
Start the query to aleph by ISSN.
def npoints_between(lon1, lat1, depth1, lon2, lat2, depth2, npoints): """ Find a list of specified number of points between two given ones that are equally spaced along the great circle arc connecting given points. :param float lon1, lat1, depth1: Coordinates of a point to start from. The first point in a resulting list has these coordinates. :param float lon2, lat2, depth2: Coordinates of a point to finish at. The last point in a resulting list has these coordinates. :param npoints: Integer number of points to return. First and last points count, so if there have to be two intervals, ``npoints`` should be 3. :returns: Tuple of three 1d numpy arrays: longitudes, latitudes and depths of resulting points respectively. Finds distance between two reference points and calls :func:`npoints_towards`. """ hdist = geodetic_distance(lon1, lat1, lon2, lat2) vdist = depth2 - depth1 rlons, rlats, rdepths = npoints_towards( lon1, lat1, depth1, azimuth(lon1, lat1, lon2, lat2), hdist, vdist, npoints ) # the last point should be left intact rlons[-1] = lon2 rlats[-1] = lat2 rdepths[-1] = depth2 return rlons, rlats, rdepths
Find a list of specified number of points between two given ones that are equally spaced along the great circle arc connecting given points. :param float lon1, lat1, depth1: Coordinates of a point to start from. The first point in a resulting list has these coordinates. :param float lon2, lat2, depth2: Coordinates of a point to finish at. The last point in a resulting list has these coordinates. :param npoints: Integer number of points to return. First and last points count, so if there have to be two intervals, ``npoints`` should be 3. :returns: Tuple of three 1d numpy arrays: longitudes, latitudes and depths of resulting points respectively. Finds distance between two reference points and calls :func:`npoints_towards`.
def predict(self, encoder_outputs, encoder_decoder_attention_bias): """Return predicted sequence.""" batch_size = tf.shape(encoder_outputs)[0] input_length = tf.shape(encoder_outputs)[1] max_decode_length = input_length + self.params.extra_decode_length symbols_to_logits_fn = self._get_symbols_to_logits_fn(max_decode_length) # Create initial set of IDs that will be passed into symbols_to_logits_fn. initial_ids = tf.zeros([batch_size], dtype=tf.int32) # Create cache storing decoder attention values for each layer. cache = { "layer_%d" % layer: { "k": tf.zeros([batch_size, 0, self.params.hidden_size]), "v": tf.zeros([batch_size, 0, self.params.hidden_size]), } for layer in range(self.params.num_hidden_layers)} # Add encoder output and attention bias to the cache. cache["encoder_outputs"] = encoder_outputs cache["encoder_decoder_attention_bias"] = encoder_decoder_attention_bias # Use beam search to find the top beam_size sequences and scores. mlperf_log.transformer_print( key=mlperf_log.MODEL_HP_SEQ_BEAM_SEARCH, value={ "vocab_size": self.params.vocab_size, "beam_size": self.params.beam_size, "alpha": self.params.alpha, "extra_decode_length": self.params.extra_decode_length}) decoded_ids, scores = beam_search.sequence_beam_search( symbols_to_logits_fn=symbols_to_logits_fn, initial_ids=initial_ids, initial_cache=cache, vocab_size=self.params.vocab_size, beam_size=self.params.beam_size, alpha=self.params.alpha, max_decode_length=max_decode_length, eos_id=EOS_ID) # Get the top sequence for each batch element top_decoded_ids = decoded_ids[:, 0, 1:] top_scores = scores[:, 0] return {"outputs": top_decoded_ids, "scores": top_scores}
Return predicted sequence.
def jira_role(name, rawtext, text, lineno, inliner, options=None, content=None, oxford_comma=True): """Sphinx role for referencing a JIRA ticket. Examples:: :jira:`DM-6181` -> DM-6181 :jira:`DM-6181,DM-6181` -> DM-6180 and DM-6181 :jira:`DM-6181,DM-6181,DM-6182` -> DM-6180, DM-6181, and DM-6182 """ options = options or {} content = content or [] config = inliner.document.settings.env.app.config ticket_ids = [each.strip() for each in utils.unescape(text).split(',')] n_tickets = len(ticket_ids) if oxford_comma: sep_factory = _oxford_comma_separator else: sep_factory = _comma_separator node_list = [] for i, ticket_id in enumerate(ticket_ids): node = _make_ticket_node(ticket_id, config, options=options) node_list.append(node) sep_text = sep_factory(i, n_tickets) if sep_text is not None: sep = nodes.raw(text=sep_text, format='html') node_list.append(sep) return node_list, []
Sphinx role for referencing a JIRA ticket. Examples:: :jira:`DM-6181` -> DM-6181 :jira:`DM-6181,DM-6181` -> DM-6180 and DM-6181 :jira:`DM-6181,DM-6181,DM-6182` -> DM-6180, DM-6181, and DM-6182
def validate_additional_properties(self, valid_response, response): """Validates additional properties. In additional properties, we only need to compare the values of the dict, not the keys Args: valid_response: An example response (for example generated in _get_example_from_properties(self, spec)) Type is DICT response: The actual dict coming from the response Type is DICT Returns: A boolean - whether the actual response validates against the given example """ assert isinstance(valid_response, dict) assert isinstance(response, dict) # the type of the value of the first key/value in valid_response is our # expected type - if it is a dict or list, we must go deeper first_value = valid_response[list(valid_response)[0]] # dict if isinstance(first_value, dict): # try to find a definition for that first value definition = None definition_name = self.get_dict_definition(first_value) if definition_name is None: definition = self._definition_from_example(first_value) definition_name = 'self generated' for item in response.values(): if not self.validate_definition(definition_name, item, definition=definition): return False return True # TODO: list if isinstance(first_value, list): raise Exception("Not implemented yet") # simple types # all values must be of that type in both valid and actual response try: assert all(isinstance(y, type(first_value)) for _, y in response.items()) assert all(isinstance(y, type(first_value)) for _, y in valid_response.items()) return True except Exception: return False
Validates additional properties. In additional properties, we only need to compare the values of the dict, not the keys Args: valid_response: An example response (for example generated in _get_example_from_properties(self, spec)) Type is DICT response: The actual dict coming from the response Type is DICT Returns: A boolean - whether the actual response validates against the given example
def choice_doinst(self): """View doinst.sh file """ if "doinst.sh" in self.sbo_files.split(): doinst_sh = ReadSBo(self.sbo_url).doinst("doinst.sh") fill = self.fill_pager(doinst_sh) self.pager(doinst_sh + fill)
View doinst.sh file
def modify(self, sp=None, ip_port=None, ip_address=None, netmask=None, v6_prefix_length=None, gateway=None, vlan_id=None): """ Modifies a replication interface. :param sp: same as the one in `create` method. :param ip_port: same as the one in `create` method. :param ip_address: same as the one in `create` method. :param netmask: same as the one in `create` method. :param v6_prefix_length: same as the one in `create` method. :param gateway: same as the one in `create` method. :param vlan_id: same as the one in `create` method. """ req_body = self._cli.make_body(sp=sp, ipPort=ip_port, ipAddress=ip_address, netmask=netmask, v6PrefixLength=v6_prefix_length, gateway=gateway, vlanId=vlan_id) resp = self.action('modify', **req_body) resp.raise_if_err() return resp
Modifies a replication interface. :param sp: same as the one in `create` method. :param ip_port: same as the one in `create` method. :param ip_address: same as the one in `create` method. :param netmask: same as the one in `create` method. :param v6_prefix_length: same as the one in `create` method. :param gateway: same as the one in `create` method. :param vlan_id: same as the one in `create` method.
def run_output(self): """Output finalized data""" for f in logdissect.output.__formats__: ouroutput = self.output_modules[f] ouroutput.write_output(self.data_set['finalized_data'], args=self.args) del(ouroutput) # Output to terminal if silent mode is not set: if not self.args.silentmode: if self.args.verbosemode: print('\n==== ++++ ==== Output: ==== ++++ ====\n') for line in self.data_set['finalized_data']['entries']: print(line['raw_text'])
Output finalized data
def from_incomplete_data(cls, vertices, normals=(), texcoords=(), **kwargs): """Return a Mesh with (vertices, normals, texcoords) as arrays, in that order. Useful for when you want a standardized array location format across different amounts of info in each mesh.""" normals = normals if hasattr(texcoords, '__iter__') and len(normals) else vertutils.calculate_normals(vertices) texcoords = texcoords if hasattr(texcoords, '__iter__') and len(texcoords) else np.zeros((vertices.shape[0], 2), dtype=np.float32) return cls(arrays=(vertices, normals, texcoords), **kwargs)
Return a Mesh with (vertices, normals, texcoords) as arrays, in that order. Useful for when you want a standardized array location format across different amounts of info in each mesh.
def consecutive_ones_property(sets, universe=None): """ Check the consecutive ones property. :param list sets: is a list of subsets of the ground set. :param groundset: is the set of all elements, by default it is the union of the given sets :returns: returns a list of the ordered ground set where every given set is consecutive, or None if there is no solution. :complexity: O(len(groundset) * len(sets)) :disclaimer: an optimal implementation would have complexity O(len(groundset) + len(sets) + sum(map(len,sets))), and there are more recent easier algorithms for this problem. """ if universe is None: universe = set() for S in sets: universe |= set(S) tree = PQ_tree(universe) try: for S in sets: tree.reduce(S) return tree.border() except IsNotC1P: return None
Check the consecutive ones property. :param list sets: is a list of subsets of the ground set. :param groundset: is the set of all elements, by default it is the union of the given sets :returns: returns a list of the ordered ground set where every given set is consecutive, or None if there is no solution. :complexity: O(len(groundset) * len(sets)) :disclaimer: an optimal implementation would have complexity O(len(groundset) + len(sets) + sum(map(len,sets))), and there are more recent easier algorithms for this problem.
def get_activating_subs(self): """Extract INDRA ActiveForm Statements based on a mutation from BEL. The SPARQL pattern used to extract ActiveForms due to mutations look for a ProteinAbundance as a subject which has a child encoding the amino acid substitution. The object of the statement is an ActivityType of the same ProteinAbundance, which is either increased or decreased. Examples: proteinAbundance(HGNC:NRAS,substitution(Q,61,K)) directlyIncreases gtpBoundActivity(proteinAbundance(HGNC:NRAS)) proteinAbundance(HGNC:TP53,substitution(F,134,I)) directlyDecreases transcriptionalActivity(proteinAbundance(HGNC:TP53)) """ q_mods = prefixes + """ SELECT ?enzyme_name ?sub_label ?act_type ?rel ?stmt ?subject WHERE { ?stmt a belvoc:Statement . ?stmt belvoc:hasRelationship ?rel . ?stmt belvoc:hasSubject ?subject . ?stmt belvoc:hasObject ?object . ?subject a belvoc:ProteinAbundance . ?subject belvoc:hasConcept ?enzyme_name . ?subject belvoc:hasChild ?sub_expr . ?sub_expr rdfs:label ?sub_label . ?object a belvoc:AbundanceActivity . ?object belvoc:hasActivityType ?act_type . ?object belvoc:hasChild ?enzyme . ?enzyme a belvoc:ProteinAbundance . ?enzyme belvoc:hasConcept ?enzyme_name . } """ # Now make the PySB for the phosphorylation res_mods = self.g.query(q_mods) for stmt in res_mods: evidence = self._get_evidence(stmt[4]) # Parse out the elements of the query enz = self._get_agent(stmt[0], stmt[5]) sub_expr = term_from_uri(stmt[1]) act_type = term_from_uri(stmt[2]).lower() # Parse the WT and substituted residues from the node label. # Strangely, the RDF for substituted residue doesn't break the # terms of the BEL expression down into their meaning, as happens # for modified protein abundances. Instead, the substitution # just comes back as a string, e.g., "sub(V,600,E)". This code # parses the arguments back out using a regular expression. match = re.match('sub\(([A-Z]),([0-9]*),([A-Z])\)', sub_expr) if match: matches = match.groups() wt_residue = matches[0] position = matches[1] sub_residue = matches[2] else: logger.warning("Could not parse substitution expression %s" % sub_expr) continue mc = MutCondition(position, wt_residue, sub_residue) enz.mutations = [mc] rel = strip_statement(stmt[3]) if rel == 'DirectlyDecreases': is_active = False else: is_active = True stmt_str = strip_statement(stmt[4]) # Mark this as a converted statement self.converted_direct_stmts.append(stmt_str) st = ActiveForm(enz, act_type, is_active, evidence) self.statements.append(st)
Extract INDRA ActiveForm Statements based on a mutation from BEL. The SPARQL pattern used to extract ActiveForms due to mutations look for a ProteinAbundance as a subject which has a child encoding the amino acid substitution. The object of the statement is an ActivityType of the same ProteinAbundance, which is either increased or decreased. Examples: proteinAbundance(HGNC:NRAS,substitution(Q,61,K)) directlyIncreases gtpBoundActivity(proteinAbundance(HGNC:NRAS)) proteinAbundance(HGNC:TP53,substitution(F,134,I)) directlyDecreases transcriptionalActivity(proteinAbundance(HGNC:TP53))
def bind(self, destination='', source='', routing_key='', arguments=None): """Bind an Exchange. :param str destination: Exchange name :param str source: Exchange to bind to :param str routing_key: The routing key to use :param dict arguments: Bind key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict """ if not compatibility.is_string(destination): raise AMQPInvalidArgument('destination should be a string') elif not compatibility.is_string(source): raise AMQPInvalidArgument('source should be a string') elif not compatibility.is_string(routing_key): raise AMQPInvalidArgument('routing_key should be a string') elif arguments is not None and not isinstance(arguments, dict): raise AMQPInvalidArgument('arguments should be a dict or None') bind_frame = pamqp_exchange.Bind(destination=destination, source=source, routing_key=routing_key, arguments=arguments) return self._channel.rpc_request(bind_frame)
Bind an Exchange. :param str destination: Exchange name :param str source: Exchange to bind to :param str routing_key: The routing key to use :param dict arguments: Bind key/value arguments :raises AMQPInvalidArgument: Invalid Parameters :raises AMQPChannelError: Raises if the channel encountered an error. :raises AMQPConnectionError: Raises if the connection encountered an error. :rtype: dict
def getVariants(self, referenceName, startPosition, endPosition, callSetIds=[]): """ Returns an iterator over the specified variants. The parameters correspond to the attributes of a GASearchVariantsRequest object. """ if callSetIds is None: callSetIds = self._callSetIds else: for callSetId in callSetIds: if callSetId not in self._callSetIds: raise exceptions.CallSetNotInVariantSetException( callSetId, self.getId()) for record in self.getPysamVariants( referenceName, startPosition, endPosition): yield self.convertVariant(record, callSetIds)
Returns an iterator over the specified variants. The parameters correspond to the attributes of a GASearchVariantsRequest object.
def zoom(params, factor): """ Applies a zoom on the current parameters. Computes the top-left plane-space coordinates from the Mandelbrot-space coordinates. :param params: Current application parameters. :param factor: Zoom factor by which the zoom ratio is divided (bigger factor, more zoom) """ params.zoom /= factor n_x = params.mb_cx / params.zoom n_y = params.mb_cy / params.zoom params.plane_x0 = int((n_x + 1.0) * params.plane_w / (2.0 * params.plane_ratio)) - params.plane_w // 2 params.plane_y0 = int((n_y + 1.0) * params.plane_h / 2.0) - params.plane_h // 2
Applies a zoom on the current parameters. Computes the top-left plane-space coordinates from the Mandelbrot-space coordinates. :param params: Current application parameters. :param factor: Zoom factor by which the zoom ratio is divided (bigger factor, more zoom)
def pretty_polyfit_plot(x, y, deg=1, xlabel=None, ylabel=None, **kwargs): """ Convenience method to plot data with trend lines based on polynomial fit. Args: x: Sequence of x data. y: Sequence of y data. deg (int): Degree of polynomial. Defaults to 1. xlabel (str): Label for x-axis. ylabel (str): Label for y-axis. \\*\\*kwargs: Keyword args passed to pretty_plot. Returns: matplotlib.pyplot object. """ plt = pretty_plot(**kwargs) pp = np.polyfit(x, y, deg) xp = np.linspace(min(x), max(x), 200) plt.plot(xp, np.polyval(pp, xp), 'k--', x, y, 'o') if xlabel: plt.xlabel(xlabel) if ylabel: plt.ylabel(ylabel) return plt
Convenience method to plot data with trend lines based on polynomial fit. Args: x: Sequence of x data. y: Sequence of y data. deg (int): Degree of polynomial. Defaults to 1. xlabel (str): Label for x-axis. ylabel (str): Label for y-axis. \\*\\*kwargs: Keyword args passed to pretty_plot. Returns: matplotlib.pyplot object.
def retrieve(pdb_id, cache_dir = None, acceptable_sequence_percentage_match = 70.0, require_uniprot_residue_mapping = True, bio_cache = None): '''Creates a PDBML object by using a cached copy of the files if they exists or by retrieving the files from the RCSB. bio_cache should be a klab.bio.cache.py::BioCache object and is used to avoid reading/downloading cached files repeatedly. ''' pdb_contents = None xml_contents = None pdb_id = pdb_id.upper() l_pdb_id = pdb_id.lower() if len(pdb_id) != 4 or not pdb_id.isalnum(): raise Exception("Bad PDB identifier '%s'." % pdb_id) if bio_cache: pdb_contents = bio_cache.get_pdb_contents(pdb_id) xml_contents = bio_cache.get_sifts_xml_contents(pdb_id) if cache_dir: if not pdb_contents: # Check to see whether we have a cached copy of the PDB file filename = os.path.join(cache_dir, "%s.pdb" % pdb_id) if os.path.exists(filename): pdb_contents = read_file(filename) if not xml_contents: # Check to see whether we have a cached copy of the XML file filename = os.path.join(cache_dir, "%s.sifts.xml.gz" % l_pdb_id) if os.path.exists(filename): xml_contents = read_file(filename) # Get any missing files from the RCSB and create cached copies if appropriate if not pdb_contents: pdb_contents = rcsb.retrieve_pdb(pdb_id) if cache_dir: write_file(os.path.join(cache_dir, "%s.pdb" % pdb_id), pdb_contents) if not xml_contents: try: xml_contents = retrieve_xml(pdb_id, silent = False) if cache_dir: write_file(os.path.join(cache_dir, "%s.sifts.xml.gz" % l_pdb_id), xml_contents) except FTPException550: raise MissingSIFTSRecord('The file "%s.sifts.xml.gz" could not be found on the EBI FTP server.' % l_pdb_id) xml_contents = xml_contents # Return the object handler = SIFTS(xml_contents, pdb_contents, acceptable_sequence_percentage_match = acceptable_sequence_percentage_match, cache_dir = cache_dir, require_uniprot_residue_mapping = require_uniprot_residue_mapping, bio_cache = bio_cache, pdb_id = pdb_id) xml.sax.parseString(xml_contents, handler) return handler
Creates a PDBML object by using a cached copy of the files if they exists or by retrieving the files from the RCSB. bio_cache should be a klab.bio.cache.py::BioCache object and is used to avoid reading/downloading cached files repeatedly.
def get_type(var): """ Gets types accounting for numpy Ignore: import utool as ut import pandas as pd var = np.array(['a', 'b', 'c']) ut.get_type(var) var = pd.Index(['a', 'b', 'c']) ut.get_type(var) """ if HAVE_NUMPY and isinstance(var, np.ndarray): if _WIN32: # This is a weird system specific error # https://github.com/numpy/numpy/issues/3667 type_ = var.dtype else: type_ = var.dtype.type elif HAVE_PANDAS and isinstance(var, pd.Index): if _WIN32: type_ = var.dtype else: type_ = var.dtype.type else: type_ = type(var) return type_
Gets types accounting for numpy Ignore: import utool as ut import pandas as pd var = np.array(['a', 'b', 'c']) ut.get_type(var) var = pd.Index(['a', 'b', 'c']) ut.get_type(var)
def static_transition(timestamp, contract_dates, transition, holidays=None, validate_inputs=True): """ An implementation of *get_weights* parameter in roller(). Return weights to tradeable instruments for a given date based on a transition DataFrame which indicates how to roll through the roll period. Parameters ---------- timestamp: pandas.Timestamp The timestamp to return instrument weights for contract_dates: pandas.Series Series with index of tradeable contract names and pandas.Timestamps representing the last date of the roll as values, sorted by values. Index must be unique and values must be strictly monotonic. transition: pandas.DataFrame A DataFrame with a index of integers representing business day offsets from the last roll date and a column which is a MultiIndex where the top level is generic instruments and the second level is ['front', 'back'] which refer to the front month contract and the back month contract of the roll. Note that for different generics, e.g. CL1, CL2, the front and back month contract during a roll would refer to different underlying instruments. The values represent the fraction of the roll on each day during the roll period. The first row of the transition period should be completely allocated to the front contract and the last row should be completely allocated to the back contract. holidays: array_like of datetime64[D] Holidays to exclude when calculating business day offsets from the last roll date. See numpy.busday_count. validate_inputs: Boolean Whether or not to validate ordering of contract_dates and transition. **Caution** this is provided for speed however if this is set to False and inputs are not defined properly algorithm may return incorrect data. Returns ------- A list of tuples consisting of the generic instrument name, the tradeable contract as a string, the weight on this contract as a float and the date as a pandas.Timestamp. Examples -------- >>> import pandas as pd >>> import mapping.mappings as mappings >>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']]) >>> idx = [-2, -1, 0] >>> transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5], ... [0.0, 1.0, 0.0, 1.0]], ... index=idx, columns=cols) >>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'), ... pd.Timestamp('2016-11-21'), ... pd.Timestamp('2016-12-20')], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> ts = pd.Timestamp('2016-10-19') >>> wts = mappings.static_transition(ts, contract_dates, transition) """ if validate_inputs: # required for MultiIndex slicing _check_static(transition.sort_index(axis=1)) # the algorithm below will return invalid results if contract_dates is # not as expected so better to fail explicitly _check_contract_dates(contract_dates) if not holidays: holidays = [] # further speedup can be obtained using contract_dates.loc[timestamp:] # but this requires swapping contract_dates index and values after_contract_dates = contract_dates.loc[contract_dates >= timestamp] contracts = after_contract_dates.index front_expiry_dt = after_contract_dates.iloc[0] days_to_expiry = np.busday_count(front_expiry_dt.date(), timestamp.date(), holidays=holidays) name2num = dict(zip(transition.columns.levels[0], range(len(transition.columns.levels[0])))) if days_to_expiry in transition.index: weights_iter = transition.loc[days_to_expiry].iteritems() # roll hasn't started yet elif days_to_expiry < transition.index.min(): # provides significant speedup over transition.iloc[0].iteritems() vals = transition.values[0] weights_iter = zip(transition.columns.tolist(), vals) # roll is finished else: vals = transition.values[-1] weights_iter = zip(transition.columns.tolist(), vals) cwts = [] for idx_tuple, weighting in weights_iter: gen_name, position = idx_tuple if weighting != 0: if position == "front": cntrct_idx = name2num[gen_name] elif position == "back": cntrct_idx = name2num[gen_name] + 1 try: cntrct_name = contracts[cntrct_idx] except IndexError as e: raise type(e)(("index {0} is out of bounds in\n{1}\nas of {2} " "resulting from {3} mapping") .format(cntrct_idx, after_contract_dates, timestamp, idx_tuple) ).with_traceback(sys.exc_info()[2]) cwts.append((gen_name, cntrct_name, weighting, timestamp)) return cwts
An implementation of *get_weights* parameter in roller(). Return weights to tradeable instruments for a given date based on a transition DataFrame which indicates how to roll through the roll period. Parameters ---------- timestamp: pandas.Timestamp The timestamp to return instrument weights for contract_dates: pandas.Series Series with index of tradeable contract names and pandas.Timestamps representing the last date of the roll as values, sorted by values. Index must be unique and values must be strictly monotonic. transition: pandas.DataFrame A DataFrame with a index of integers representing business day offsets from the last roll date and a column which is a MultiIndex where the top level is generic instruments and the second level is ['front', 'back'] which refer to the front month contract and the back month contract of the roll. Note that for different generics, e.g. CL1, CL2, the front and back month contract during a roll would refer to different underlying instruments. The values represent the fraction of the roll on each day during the roll period. The first row of the transition period should be completely allocated to the front contract and the last row should be completely allocated to the back contract. holidays: array_like of datetime64[D] Holidays to exclude when calculating business day offsets from the last roll date. See numpy.busday_count. validate_inputs: Boolean Whether or not to validate ordering of contract_dates and transition. **Caution** this is provided for speed however if this is set to False and inputs are not defined properly algorithm may return incorrect data. Returns ------- A list of tuples consisting of the generic instrument name, the tradeable contract as a string, the weight on this contract as a float and the date as a pandas.Timestamp. Examples -------- >>> import pandas as pd >>> import mapping.mappings as mappings >>> cols = pd.MultiIndex.from_product([["CL1", "CL2"], ['front', 'back']]) >>> idx = [-2, -1, 0] >>> transition = pd.DataFrame([[1.0, 0.0, 1.0, 0.0], [0.5, 0.5, 0.5, 0.5], ... [0.0, 1.0, 0.0, 1.0]], ... index=idx, columns=cols) >>> contract_dates = pd.Series([pd.Timestamp('2016-10-20'), ... pd.Timestamp('2016-11-21'), ... pd.Timestamp('2016-12-20')], ... index=['CLX16', 'CLZ16', 'CLF17']) >>> ts = pd.Timestamp('2016-10-19') >>> wts = mappings.static_transition(ts, contract_dates, transition)
def loglikelihood(self, x, previous=False): """return log-likelihood of `x` regarding the current sample distribution""" # testing of original fct: MC integrate must be one: mean(p(x_i)) * volume(where x_i are uniformely sampled) # for i in xrange(3): print mean([cma.likelihood(20*r-10, dim * [0], None, 3) for r in rand(10000,dim)]) * 20**dim # TODO: test this!! # c=cma.fmin... # c[3]['cma'].loglikelihood(...) if previous and hasattr(self, 'lastiter'): sigma = self.lastiter.sigma Crootinv = self.lastiter._Crootinv xmean = self.lastiter.mean D = self.lastiter.D elif previous and self.countiter > 1: raise _Error('no previous distribution parameters stored, check options importance_mixing') else: sigma = self.sigma Crootinv = self._Crootinv xmean = self.mean D = self.D dx = array(x) - xmean # array(x) - array(m) n = self.N logs2pi = n * log(2 * np.pi) / 2. logdetC = 2 * sum(log(D)) dx = np.dot(Crootinv, dx) res = -sum(dx**2) / sigma**2 / 2 - logs2pi - logdetC / 2 - n * log(sigma) if 1 < 3: # testing s2pi = (2 * np.pi)**(n / 2.) detC = np.prod(D)**2 res2 = -sum(dx**2) / sigma**2 / 2 - log(s2pi * abs(detC)**0.5 * sigma**n) assert res2 < res + 1e-8 or res2 > res - 1e-8 return res
return log-likelihood of `x` regarding the current sample distribution
def encoded_to_array(encoded): """ Turn a dictionary with base64 encoded strings back into a numpy array. Parameters ------------ encoded : dict Has keys: dtype: string of dtype shape: int tuple of shape base64: base64 encoded string of flat array binary: decode result coming from numpy.tostring Returns ---------- array: numpy array """ if not isinstance(encoded, dict): if is_sequence(encoded): as_array = np.asanyarray(encoded) return as_array else: raise ValueError('Unable to extract numpy array from input') encoded = decode_keys(encoded) dtype = np.dtype(encoded['dtype']) if 'base64' in encoded: array = np.frombuffer(base64.b64decode(encoded['base64']), dtype) elif 'binary' in encoded: array = np.frombuffer(encoded['binary'], dtype=dtype) if 'shape' in encoded: array = array.reshape(encoded['shape']) return array
Turn a dictionary with base64 encoded strings back into a numpy array. Parameters ------------ encoded : dict Has keys: dtype: string of dtype shape: int tuple of shape base64: base64 encoded string of flat array binary: decode result coming from numpy.tostring Returns ---------- array: numpy array
def _set_rbridge_id(self, v, load=False): """ Setter method for rbridge_id, mapped from YANG variable /rbridge_id (list) If this variable is read-only (config: false) in the source YANG file, then _set_rbridge_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rbridge_id() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=YANGListType("rbridge_id",rbridge_id.rbridge_id, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='rbridge-id', extensions={u'tailf-common': {u'info': u'Rbridge Id for Node Specific configuration', u'callpoint': u'vcsnodespecificcallpoint', u'sort-priority': u'RUNNCFG_LEVEL_RBRIDGE', u'cli-suppress-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}), is_container='list', yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Rbridge Id for Node Specific configuration', u'callpoint': u'vcsnodespecificcallpoint', u'sort-priority': u'RUNNCFG_LEVEL_RBRIDGE', u'cli-suppress-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-rbridge', defining_module='brocade-rbridge', yang_type='list', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """rbridge_id must be of a type compatible with list""", 'defined-type': "list", 'generated-type': """YANGDynClass(base=YANGListType("rbridge_id",rbridge_id.rbridge_id, yang_name="rbridge-id", rest_name="rbridge-id", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='rbridge-id', extensions={u'tailf-common': {u'info': u'Rbridge Id for Node Specific configuration', u'callpoint': u'vcsnodespecificcallpoint', u'sort-priority': u'RUNNCFG_LEVEL_RBRIDGE', u'cli-suppress-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}), is_container='list', yang_name="rbridge-id", rest_name="rbridge-id", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Rbridge Id for Node Specific configuration', u'callpoint': u'vcsnodespecificcallpoint', u'sort-priority': u'RUNNCFG_LEVEL_RBRIDGE', u'cli-suppress-no': None, u'display-when': u'/vcsmode/vcs-mode = "true"'}}, namespace='urn:brocade.com:mgmt:brocade-rbridge', defining_module='brocade-rbridge', yang_type='list', is_config=True)""", }) self.__rbridge_id = t if hasattr(self, '_set'): self._set()
Setter method for rbridge_id, mapped from YANG variable /rbridge_id (list) If this variable is read-only (config: false) in the source YANG file, then _set_rbridge_id is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_rbridge_id() directly.
def items(self): """ Get list of download items. """ if self.matcher: for item in self._fetch_items(): if self.matcher.match(item): yield item else: for item in self._fetch_items(): yield item
Get list of download items.
def get_tunnel_info_output_tunnel_dest_ip(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_tunnel_info = ET.Element("get_tunnel_info") config = get_tunnel_info output = ET.SubElement(get_tunnel_info, "output") tunnel = ET.SubElement(output, "tunnel") dest_ip = ET.SubElement(tunnel, "dest-ip") dest_ip.text = kwargs.pop('dest_ip') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def hacking_has_only_comments(physical_line, filename, lines, line_number): """Check for empty files with only comments H104 empty file with only comments """ if line_number == 1 and all(map(EMPTY_LINE_RE.match, lines)): return (0, "H104: File contains nothing but comments")
Check for empty files with only comments H104 empty file with only comments
def format(format_string, cast=lambda x: x): """ A pre-called helper to supply a modern string format (the kind with {} instead of %s), so that it can apply to each value in the column as it is rendered. This can be useful for string padding like leading zeroes, or rounding floating point numbers to a certain number of decimal places, etc. If given, the ``cast`` argument should be a mapping function that coerces the input to whatever type is required for the string formatting to work. Trying to push string data into a float format will raise an exception, for example, so the ``float`` type itself could be given as the ``cast`` function. Examples:: # Perform some 0 padding item_number = columns.FloatColumn("Item No.", sources=['number'], processor=format("{:03d})) # Force a string column value to coerce to float and round to 2 decimal places rating = columns.TextColumn("Rating", sources=['avg_rating'], processor=format("{:.2f}", cast=float)) """ def helper(instance, *args, **kwargs): value = kwargs.get('default_value') if value is None: value = instance value = cast(value) return format_string.format(value, obj=instance) return helper
A pre-called helper to supply a modern string format (the kind with {} instead of %s), so that it can apply to each value in the column as it is rendered. This can be useful for string padding like leading zeroes, or rounding floating point numbers to a certain number of decimal places, etc. If given, the ``cast`` argument should be a mapping function that coerces the input to whatever type is required for the string formatting to work. Trying to push string data into a float format will raise an exception, for example, so the ``float`` type itself could be given as the ``cast`` function. Examples:: # Perform some 0 padding item_number = columns.FloatColumn("Item No.", sources=['number'], processor=format("{:03d})) # Force a string column value to coerce to float and round to 2 decimal places rating = columns.TextColumn("Rating", sources=['avg_rating'], processor=format("{:.2f}", cast=float))
def plot_row_coordinates(self, X, ax=None, figsize=(6, 6), x_component=0, y_component=1, labels=None, color_labels=None, ellipse_outline=False, ellipse_fill=True, show_points=True, **kwargs): """Plot the row principal coordinates.""" utils.validation.check_is_fitted(self, 's_') if ax is None: fig, ax = plt.subplots(figsize=figsize) # Add style ax = plot.stylize_axis(ax) # Make sure X is a DataFrame if not isinstance(X, pd.DataFrame): X = pd.DataFrame(X) # Retrieve principal coordinates coordinates = self.row_coordinates(X) x = coordinates[x_component].astype(np.float) y = coordinates[y_component].astype(np.float) # Plot if color_labels is None: ax.scatter(x, y, **kwargs) else: for color_label in sorted(list(set(color_labels))): mask = np.array(color_labels) == color_label color = ax._get_lines.get_next_color() # Plot points if show_points: ax.scatter(x[mask], y[mask], color=color, **kwargs, label=color_label) # Plot ellipse if (ellipse_outline or ellipse_fill): x_mean, y_mean, width, height, angle = plot.build_ellipse(x[mask], y[mask]) ax.add_patch(mpl.patches.Ellipse( (x_mean, y_mean), width, height, angle=angle, linewidth=2 if ellipse_outline else 0, color=color, fill=ellipse_fill, alpha=0.2 + (0.3 if not show_points else 0) if ellipse_fill else 1 )) # Add labels if labels is not None: for i, label in enumerate(labels): ax.annotate(label, (x[i], y[i])) # Legend ax.legend() # Text ax.set_title('Row principal coordinates') ei = self.explained_inertia_ ax.set_xlabel('Component {} ({:.2f}% inertia)'.format(x_component, 100 * ei[x_component])) ax.set_ylabel('Component {} ({:.2f}% inertia)'.format(y_component, 100 * ei[y_component])) return ax
Plot the row principal coordinates.
def text(self, paths, wholetext=False, lineSep=None): """ Loads text files and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> df = spark.read.text('python/test_support/sql/text-test.txt') >>> df.collect() [Row(value=u'hello'), Row(value=u'this')] >>> df = spark.read.text('python/test_support/sql/text-test.txt', wholetext=True) >>> df.collect() [Row(value=u'hello\\nthis')] """ self._set_opts(wholetext=wholetext, lineSep=lineSep) if isinstance(paths, basestring): paths = [paths] return self._df(self._jreader.text(self._spark._sc._jvm.PythonUtils.toSeq(paths)))
Loads text files and returns a :class:`DataFrame` whose schema starts with a string column named "value", and followed by partitioned columns if there are any. The text files must be encoded as UTF-8. By default, each line in the text file is a new row in the resulting DataFrame. :param paths: string, or list of strings, for input path(s). :param wholetext: if true, read each file from input path(s) as a single row. :param lineSep: defines the line separator that should be used for parsing. If None is set, it covers all ``\\r``, ``\\r\\n`` and ``\\n``. >>> df = spark.read.text('python/test_support/sql/text-test.txt') >>> df.collect() [Row(value=u'hello'), Row(value=u'this')] >>> df = spark.read.text('python/test_support/sql/text-test.txt', wholetext=True) >>> df.collect() [Row(value=u'hello\\nthis')]
def attend_on_question(self, query: torch.Tensor, encoder_outputs: torch.Tensor, encoder_output_mask: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: """ Given a query (which is typically the decoder hidden state), compute an attention over the output of the question encoder, and return a weighted sum of the question representations given this attention. We also return the attention weights themselves. This is a simple computation, but we have it as a separate method so that the ``forward`` method on the main parser module can call it on the initial hidden state, to simplify the logic in ``take_step``. """ # (group_size, question_length) question_attention_weights = self._input_attention(query, encoder_outputs, encoder_output_mask) # (group_size, encoder_output_dim) attended_question = util.weighted_sum(encoder_outputs, question_attention_weights) return attended_question, question_attention_weights
Given a query (which is typically the decoder hidden state), compute an attention over the output of the question encoder, and return a weighted sum of the question representations given this attention. We also return the attention weights themselves. This is a simple computation, but we have it as a separate method so that the ``forward`` method on the main parser module can call it on the initial hidden state, to simplify the logic in ``take_step``.
def at(*args, **kwargs): # pylint: disable=C0103 ''' Add a job to the queue. The 'timespec' follows the format documented in the at(1) manpage. CLI Example: .. code-block:: bash salt '*' at.at <timespec> <cmd> [tag=<tag>] [runas=<user>] salt '*' at.at 12:05am '/sbin/reboot' tag=reboot salt '*' at.at '3:05am +3 days' 'bin/myscript' tag=nightly runas=jim ''' if len(args) < 2: return {'jobs': []} # Shim to produce output similar to what __virtual__() should do # but __salt__ isn't available in __virtual__() binary = salt.utils.path.which('at') if not binary: return '\'at.at\' is not available.' if 'tag' in kwargs: stdin = '### SALT: {0}\n{1}'.format(kwargs['tag'], ' '.join(args[1:])) else: stdin = ' '.join(args[1:]) cmd = [binary, args[0]] cmd_kwargs = {'stdin': stdin, 'python_shell': False} if 'runas' in kwargs: cmd_kwargs['runas'] = kwargs['runas'] output = __salt__['cmd.run'](cmd, **cmd_kwargs) if output is None: return '\'at.at\' is not available.' if output.endswith('Garbled time'): return {'jobs': [], 'error': 'invalid timespec'} if output.startswith('warning: commands'): output = output.splitlines()[1] if output.startswith('commands will be executed'): output = output.splitlines()[1] output = output.split()[1] if __grains__['os'] in BSD: return atq(six.text_type(output)) else: return atq(int(output))
Add a job to the queue. The 'timespec' follows the format documented in the at(1) manpage. CLI Example: .. code-block:: bash salt '*' at.at <timespec> <cmd> [tag=<tag>] [runas=<user>] salt '*' at.at 12:05am '/sbin/reboot' tag=reboot salt '*' at.at '3:05am +3 days' 'bin/myscript' tag=nightly runas=jim
def emit_rmic_classes(target, source, env): """Create and return lists of Java RMI stub and skeleton class files to be created from a set of class files. """ class_suffix = env.get('JAVACLASSSUFFIX', '.class') classdir = env.get('JAVACLASSDIR') if not classdir: try: s = source[0] except IndexError: classdir = '.' else: try: classdir = s.attributes.java_classdir except AttributeError: classdir = '.' classdir = env.Dir(classdir).rdir() if str(classdir) == '.': c_ = None else: c_ = str(classdir) + os.sep slist = [] for src in source: try: classname = src.attributes.java_classname except AttributeError: classname = str(src) if c_ and classname[:len(c_)] == c_: classname = classname[len(c_):] if class_suffix and classname[:-len(class_suffix)] == class_suffix: classname = classname[-len(class_suffix):] s = src.rfile() s.attributes.java_classdir = classdir s.attributes.java_classname = classname slist.append(s) stub_suffixes = ['_Stub'] if env.get('JAVAVERSION') == '1.4': stub_suffixes.append('_Skel') tlist = [] for s in source: for suff in stub_suffixes: fname = s.attributes.java_classname.replace('.', os.sep) + \ suff + class_suffix t = target[0].File(fname) t.attributes.java_lookupdir = target[0] tlist.append(t) return tlist, source
Create and return lists of Java RMI stub and skeleton class files to be created from a set of class files.
def set_item(key,value): """Write JSON content from value argument to cached file and return""" CACHED_KEY_FILE = os.path.join(CURRENT_DIR, key) open(CACHED_KEY_FILE, "wb").write(json.dumps({"_": value}).encode('UTF-8')) return value
Write JSON content from value argument to cached file and return
def Shift(self, value, copy=False): """ Shift the graph left or right by value """ numPoints = self.GetN() if copy: shiftGraph = self.Clone() else: shiftGraph = self X = self.GetX() EXlow = self.GetEXlow() EXhigh = self.GetEXhigh() Y = self.GetY() EYlow = self.GetEYlow() EYhigh = self.GetEYhigh() for i in range(numPoints): shiftGraph.SetPoint(i, X[i] + value, Y[i]) shiftGraph.SetPointError( i, EXlow[i], EXhigh[i], EYlow[i], EYhigh[i]) return shiftGraph
Shift the graph left or right by value
def toggle(self, key): """ Toggles a boolean key """ val = self[key] assert isinstance(val, bool), 'key[%r] = %r is not a bool' % (key, val) self.pref_update(key, not val)
Toggles a boolean key
def check_sensor(self, helper): """ check the status of the specified sensor """ try: sensor_name, sensor_state, sensor_type = self.sess.get_oids( self.oids['oid_sensor_name'], self.oids['oid_sensor_state'], self.oids['oid_sensor_type']) except health_monitoring_plugins.SnmpException as e: helper.exit(summary=str(e), exit_code=unknown, perfdata='') try: sensor_state_string = states[int(sensor_state)] except KeyError as e: helper.exit(summary="Invalid sensor response " + sensor_state, exit_code=unknown, perfdata='') sensor_unit = "" # if it's a onOff Sensor or something like that, we need an empty string for the summary sensor_unit_string = "" sensor_value = "" sensor_digit = "" real_sensor_value = "" sensor_warning_upper = "" sensor_critical_upper = "" sensor_warning_lower = "" sensor_critical_lower = "" if int(sensor_type) not in [14, 16, 17, 18, 19, 20]: # for all sensors except these, we want to calculate the real value and show the metric. # 14: onOff # 16: vibration # 17: waterDetection # 18: smokeDetection # 19: binary # 20: contact try: sensor_unit, sensor_digit, sensor_warning_upper, sensor_critical_upper, sensor_warning_lower, sensor_critical_lower, sensor_value = self.sess.get_oids( self.oids['oid_sensor_unit'], self.oids['oid_sensor_digit'], self.oids['oid_sensor_warning_upper'], self.oids['oid_sensor_critical_upper'], self.oids['oid_sensor_warning_lower'], self.oids['oid_sensor_critical_lower'], self.oids['oid_sensor_value']) except health_monitoring_plugins.SnmpException as e: helper.exit(summary=str(e), exit_code=unknown, perfdata='') sensor_unit_string = units[int(sensor_unit)] real_sensor_value = real_value(int(sensor_value), sensor_digit) real_sensor_warning_upper = real_value(sensor_warning_upper, sensor_digit) real_sensor_critical_upper = real_value(sensor_critical_upper, sensor_digit) real_sensor_warning_lower = real_value(sensor_warning_lower, sensor_digit) real_sensor_critical_lower = real_value(sensor_critical_lower, sensor_digit) # metrics are only possible for these sensors helper.add_metric(sensor_name + " -%s- " % sensor_unit_string, real_sensor_value, real_sensor_warning_lower +\ ":" + real_sensor_warning_upper, real_sensor_critical_lower +\ ":" + real_sensor_critical_upper, "", "", "") # "OK" state if sensor_state_string in ["closed", "normal", "on", "notDetected", "ok", "yes", "one", "two", "inSync"]: helper.status(ok) # "WARNING" state elif sensor_state_string in ["open", "belowLowerWarning", "aboveUpperWarning", "marginal", "standby"]: helper.status(warning) # "CRITICAL" state elif sensor_state_string in ["belowLowerCritical", "aboveUpperCritical", "off", "detected", "alarmed", "fail", "no", "outOfSync"]: helper.status(critical) # "UNKOWN" state elif sensor_state_string in ["unavailable"]: helper.status(unknown) # received an undefined state else: helper.exit(summary="Something went wrong - received undefined state", exit_code=unknown, perfdata='') # summary is shown for all sensors helper.add_summary("Sensor %s - '%s' %s%s is: %s" % (self.number, sensor_name, real_sensor_value, sensor_unit_string, sensor_state_string))
check the status of the specified sensor
def connect(self, sock): """Attach a given socket to a channel""" def cbwrap(*args, **kwargs): """Callback wrapper; passes in response_type""" self.callback(self.response_type, *args, **kwargs) self.sock = sock self.sock.subscribe(self.channel) self.sock.onchannel(self.channel, cbwrap)
Attach a given socket to a channel
def enum(number, zone='e164.arpa'): ''' Printable DNS ENUM (telephone number mapping) record. :param number: string :param zone: string >>> print(enum('+31 20 5423 1567')) 7.6.5.1.3.2.4.5.0.2.1.3.e164.arpa. >>> print(enum('+31 97 99 6642', zone='e164.spacephone.org')) 2.4.6.6.9.9.7.9.1.3.e164.spacephone.org. ''' number = e164(number).lstrip('+') return u'.'.join([ u'.'.join(number[::-1]), zone.strip(u'.'), '', ])
Printable DNS ENUM (telephone number mapping) record. :param number: string :param zone: string >>> print(enum('+31 20 5423 1567')) 7.6.5.1.3.2.4.5.0.2.1.3.e164.arpa. >>> print(enum('+31 97 99 6642', zone='e164.spacephone.org')) 2.4.6.6.9.9.7.9.1.3.e164.spacephone.org.
def get_scanner_param_type(self, param): """ Returns type of a scanner parameter. """ assert isinstance(param, str) entry = self.scanner_params.get(param) if not entry: return None return entry.get('type')
Returns type of a scanner parameter.
def check(text): """Check the text.""" err = "consistency.spacing" msg = "Inconsistent spacing after period (1 vs. 2 spaces)." regex = ["[\.\?!] [A-Z]", "[\.\?!] [A-Z]"] return consistency_check(text, [regex], err, msg)
Check the text.
def AskFileForSave(message=None, savedFileName=None, version=None, defaultLocation=None, dialogOptionFlags=None, location=None, clientName=None, windowTitle=None, actionButtonLabel=None, cancelButtonLabel=None, preferenceKey=None, popupExtension=None, eventProc=None, fileType=None, fileCreator=None, wanted=None, multiple=None): """Original doc: Display a dialog asking the user for a filename to save to. wanted is the return type wanted: FSSpec, FSRef, unicode or string (default) the other arguments can be looked up in Apple's Navigation Services documentation""" return psidialogs.ask_file(message=message, save=True)
Original doc: Display a dialog asking the user for a filename to save to. wanted is the return type wanted: FSSpec, FSRef, unicode or string (default) the other arguments can be looked up in Apple's Navigation Services documentation
def fake_print(self): ''' This is the overridden __str__ method for Operation Recursively prints out the actual query to be executed ''' def _fake_run(): kwargs = self.kwargs.copy() kwargs['generate'] = True return _fake_handle_result( getattr(self.migrator, self.method)(*self.args, **kwargs) ) def _fake_handle_result(result): if isinstance(result, Node): sql, params = self._parse_node(result) return (sql, params) elif isinstance(result, Operation): return str(result) elif isinstance(result, (list, tuple)): return '\n'.join([str(_fake_handle_result(item)) for item in result]) return str(_fake_run())
This is the overridden __str__ method for Operation Recursively prints out the actual query to be executed
def minify_ring(mol, verbose=False): """ Minify ring set (similar to SSSR) Limitation: this can not correctly recognize minimum rings in the case of non-outerplanar graph. Note: concept of SSSR is controversial. Roughly reduce the size of cycle basis can help some scaffold-based analysis """ mol.require("Topology") for cyc_idx in mol.scaffolds: rings = deque(sorted([mol.rings[c] for c in cyc_idx], key=len)) minified = [] cnt = 0 while rings: cnt += 1 if cnt > 100: mol.descriptors.add("MinifiedRing") raise RuntimeError("Ring minimization failed") r = rings.popleft() init_r = r if verbose: print(len(r), "Ring:{}".format(r)) for m in minified: if verbose: print(len(m), "Minified:{}".format(m)) resolved = resolve_inclusion(r, m) if resolved: if verbose: print(len(resolved[0]), len(resolved[1]), "Resolved:{}".format(resolved)) r = resolved[0] if verbose: print(len(r), "New ring:{}\n".format(r)) if len(r) == len(init_r): # no longer be able to minified minified.append(r) else: # further minification required rings.append(r) for c in cyc_idx: mol.rings[c] = minified.pop() mol.descriptors.add("MinifiedRing")
Minify ring set (similar to SSSR) Limitation: this can not correctly recognize minimum rings in the case of non-outerplanar graph. Note: concept of SSSR is controversial. Roughly reduce the size of cycle basis can help some scaffold-based analysis
def minute_trend_times(start, end): """Expand a [start, end) interval for use in querying for minute trends NDS2 requires start and end times for minute trends to be a multiple of 60 (to exactly match the time of a minute-trend sample), so this function expands the given ``[start, end)`` interval to the nearest multiples. Parameters ---------- start : `int` GPS start time of query end : `int` GPS end time of query Returns ------- mstart : `int` ``start`` rounded down to nearest multiple of 60 mend : `int` ``end`` rounded up to nearest multiple of 60 """ if start % 60: start = int(start) // 60 * 60 if end % 60: end = int(end) // 60 * 60 + 60 return int(start), int(end)
Expand a [start, end) interval for use in querying for minute trends NDS2 requires start and end times for minute trends to be a multiple of 60 (to exactly match the time of a minute-trend sample), so this function expands the given ``[start, end)`` interval to the nearest multiples. Parameters ---------- start : `int` GPS start time of query end : `int` GPS end time of query Returns ------- mstart : `int` ``start`` rounded down to nearest multiple of 60 mend : `int` ``end`` rounded up to nearest multiple of 60
def solve_limited(self, assumptions=[]): """ Solve internal formula using given budgets for conflicts and propagations. """ if self.minisat: if self.use_timer: start_time = time.clock() # saving default SIGINT handler def_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_DFL) self.status = pysolvers.minisatgh_solve_lim(self.minisat, assumptions) # recovering default SIGINT handler def_sigint_handler = signal.signal(signal.SIGINT, def_sigint_handler) if self.use_timer: self.call_time = time.clock() - start_time self.accu_time += self.call_time return self.status
Solve internal formula using given budgets for conflicts and propagations.
def to_indra_statements(graph): """Export this graph as a list of INDRA statements using the :py:class:`indra.sources.pybel.PybelProcessor`. :param pybel.BELGraph graph: A BEL graph :rtype: list[indra.statements.Statement] """ from indra.sources.bel import process_pybel_graph pbp = process_pybel_graph(graph) return pbp.statements
Export this graph as a list of INDRA statements using the :py:class:`indra.sources.pybel.PybelProcessor`. :param pybel.BELGraph graph: A BEL graph :rtype: list[indra.statements.Statement]
def connect(): """Try to connect to the router. Returns: u (miniupnc.UPnP): the connected upnp-instance router (string): the connection information """ upnp = miniupnpc.UPnP() upnp.discoverdelay = 200 providers = upnp.discover() if providers > 1: log.debug('multiple upnp providers found', num_providers=providers) elif providers < 1: log.error('no upnp providers found') return None try: location = upnp.selectigd() log.debug('connected', upnp=upnp) except Exception as e: log.error('Error when connecting to uPnP provider', exception_info=e) return None if not valid_mappable_ipv4(upnp.lanaddr): log.error('could not query your lanaddr', reported=upnp.lanaddr) return None try: # this can fail if router advertises uPnP incorrectly if not valid_mappable_ipv4(upnp.externalipaddress()): log.error('could not query your externalipaddress', reported=upnp.externalipaddress()) return None return upnp, location except Exception: log.error('error when connecting with uPnP provider', location=location) return None
Try to connect to the router. Returns: u (miniupnc.UPnP): the connected upnp-instance router (string): the connection information
def single(fun, name, test=None, queue=False, **kwargs): ''' Execute a single state function with the named kwargs, returns False if insufficient data is sent to the command By default, the values of the kwargs will be parsed as YAML. So, you can specify lists values, or lists of single entry key-value maps, as you would in a YAML salt file. Alternatively, JSON format of keyword values is also supported. CLI Example: .. code-block:: bash salt '*' state.single pkg.installed name=vim ''' conflict = _check_queue(queue, kwargs) if conflict is not None: return conflict comps = fun.split('.') if len(comps) < 2: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return 'Invalid function passed' kwargs.update({'state': comps[0], 'fun': comps[1], '__id__': name, 'name': name}) orig_test = __opts__.get('test', None) opts = salt.utils.state.get_sls_opts(__opts__, **kwargs) opts['test'] = _get_test_value(test, **kwargs) pillar_override = kwargs.get('pillar') pillar_enc = kwargs.get('pillar_enc') if pillar_enc is None \ and pillar_override is not None \ and not isinstance(pillar_override, dict): raise SaltInvocationError( 'Pillar data must be formatted as a dictionary, unless pillar_enc ' 'is specified.' ) try: st_ = salt.state.State(opts, pillar_override, pillar_enc=pillar_enc, proxy=__proxy__, initial_pillar=_get_initial_pillar(opts)) except NameError: st_ = salt.state.State(opts, pillar_override, pillar_enc=pillar_enc, initial_pillar=_get_initial_pillar(opts)) err = st_.verify_data(kwargs) if err: __context__['retcode'] = salt.defaults.exitcodes.EX_STATE_COMPILER_ERROR return err st_._mod_init(kwargs) snapper_pre = _snapper_pre(opts, kwargs.get('__pub_jid', 'called localy')) ret = {'{0[state]}_|-{0[__id__]}_|-{0[name]}_|-{0[fun]}'.format(kwargs): st_.call(kwargs)} _set_retcode(ret) # Work around Windows multiprocessing bug, set __opts__['test'] back to # value from before this function was run. _snapper_post(opts, kwargs.get('__pub_jid', 'called localy'), snapper_pre) __opts__['test'] = orig_test return ret
Execute a single state function with the named kwargs, returns False if insufficient data is sent to the command By default, the values of the kwargs will be parsed as YAML. So, you can specify lists values, or lists of single entry key-value maps, as you would in a YAML salt file. Alternatively, JSON format of keyword values is also supported. CLI Example: .. code-block:: bash salt '*' state.single pkg.installed name=vim
async def destroy_async(self): """Asynchronously close any open management Links and close the Session. Cleans up and C objects for both mgmt Links and Session. """ for _, link in self._mgmt_links.items(): await link.destroy_async() self._session.destroy()
Asynchronously close any open management Links and close the Session. Cleans up and C objects for both mgmt Links and Session.
def _processHandler(self, securityHandler, param_dict): """proceses the handler and returns the cookiejar""" cj = None handler = None if securityHandler is None: cj = cookiejar.CookieJar() elif securityHandler.method.lower() == "token": param_dict['token'] = securityHandler.token if hasattr(securityHandler, 'cookiejar'): cj = securityHandler.cookiejar if hasattr(securityHandler, 'handler'): handler = securityHandler.handler elif securityHandler.method.lower() == "handler": handler = securityHandler.handler cj = securityHandler.cookiejar return param_dict, handler, cj
proceses the handler and returns the cookiejar
def parse_args(parser, modules, args=None): """Set up global configuration for command-line tools. `modules` is an iterable of :class:`yakonfig.Configurable` objects, or anything equivalently typed. This function iterates through those objects and calls :meth:`~yakonfig.Configurable.add_arguments` on each to build up a complete list of command-line arguments, then calls :meth:`argparse.ArgumentParser.parse_args` to actually process the command line. This produces a configuration that is a combination of all default values declared by all modules; configuration specified in ``--config`` arguments; and overriding configuration values specified in command-line arguments. This returns the :class:`argparse.Namespace` object, in case the application has defined its own command-line parameters and needs to process them. The new global configuration can be obtained via :func:`yakonfig.get_global_config`. :param argparse.ArgumentParser parser: application-provided argument parser :param modules: modules or Configurable instances to use :type modules: iterable of :class:`~yakonfig.Configurable` :param args: command-line options, or `None` to use `sys.argv` :return: the new global configuration """ collect_add_argparse(parser, modules) namespace = parser.parse_args(args) try: do_dump_config = getattr(namespace, 'dump_config', None) set_default_config(modules, params=vars(namespace), validate=not do_dump_config) if do_dump_config: if namespace.dump_config == 'full': to_dump = get_global_config() elif namespace.dump_config == 'default': to_dump = assemble_default_config(modules) else: # 'effective' to_dump = diff_config(assemble_default_config(modules), get_global_config()) yaml_mod.dump(to_dump, sys.stdout) parser.exit() except ConfigurationError as e: parser.error(e) return namespace
Set up global configuration for command-line tools. `modules` is an iterable of :class:`yakonfig.Configurable` objects, or anything equivalently typed. This function iterates through those objects and calls :meth:`~yakonfig.Configurable.add_arguments` on each to build up a complete list of command-line arguments, then calls :meth:`argparse.ArgumentParser.parse_args` to actually process the command line. This produces a configuration that is a combination of all default values declared by all modules; configuration specified in ``--config`` arguments; and overriding configuration values specified in command-line arguments. This returns the :class:`argparse.Namespace` object, in case the application has defined its own command-line parameters and needs to process them. The new global configuration can be obtained via :func:`yakonfig.get_global_config`. :param argparse.ArgumentParser parser: application-provided argument parser :param modules: modules or Configurable instances to use :type modules: iterable of :class:`~yakonfig.Configurable` :param args: command-line options, or `None` to use `sys.argv` :return: the new global configuration
def teardown_websocket(self, func: Callable) -> Callable: """Add a teardown websocket function to the Blueprint. This is designed to be used as a decorator, and has the same arguments as :meth:`~quart.Quart.teardown_websocket`. It applies only to requests that are routed to an endpoint in this blueprint. An example usage, .. code-block:: python blueprint = Blueprint(__name__) @blueprint.teardown_websocket def teardown(): ... """ self.record_once(lambda state: state.app.teardown_websocket(func, self.name)) return func
Add a teardown websocket function to the Blueprint. This is designed to be used as a decorator, and has the same arguments as :meth:`~quart.Quart.teardown_websocket`. It applies only to requests that are routed to an endpoint in this blueprint. An example usage, .. code-block:: python blueprint = Blueprint(__name__) @blueprint.teardown_websocket def teardown(): ...
def isCollapsed( self ): """ Returns whether or not this group box is collapsed. :return <bool> """ if not self.isCollapsible(): return False if self._inverted: return self.isChecked() return not self.isChecked()
Returns whether or not this group box is collapsed. :return <bool>
def extract_child_models_of_state(state_m, new_state_class): """Retrieve child models of state model The function extracts the child state and state element models of the given state model into a dict. It only extracts those properties that are required for a state of type `new_state_class`. Transitions are always left out. :param state_m: state model of which children are to be extracted from :param new_state_class: The type of the new class :return: """ # check if root state and which type of state assert isinstance(state_m, StateModel) assert issubclass(new_state_class, State) orig_state = state_m.state # only here to get the input parameter of the Core-function current_state_is_container = isinstance(orig_state, ContainerState) new_state_is_container = issubclass(new_state_class, ContainerState) # define which model references to hold for new state required_model_properties = ['input_data_ports', 'output_data_ports', 'outcomes', 'income'] obsolete_model_properties = [] if current_state_is_container and new_state_is_container: # hold some additional references # transition are removed when changing the state type, thus do not copy them required_model_properties.extend(['states', 'data_flows', 'scoped_variables']) obsolete_model_properties.append('transitions') elif current_state_is_container: obsolete_model_properties.extend(['states', 'transitions', 'data_flows', 'scoped_variables']) def get_element_list(state_m, prop_name): if prop_name == 'income': return [state_m.income] wrapper = getattr(state_m, prop_name) # ._obj is needed as gaphas wraps observable lists and dicts into a gaphas.support.ObsWrapper list_or_dict = wrapper._obj if isinstance(list_or_dict, list): return list_or_dict[:] # copy list return list(list_or_dict.values()) # dict required_child_models = {} for prop_name in required_model_properties: required_child_models[prop_name] = get_element_list(state_m, prop_name) obsolete_child_models = {} for prop_name in obsolete_model_properties: obsolete_child_models[prop_name] = get_element_list(state_m, prop_name) # Special handling of BarrierState, which includes the DeciderState that always becomes obsolete if isinstance(state_m, ContainerStateModel): decider_state_m = state_m.states.get(UNIQUE_DECIDER_STATE_ID, None) if decider_state_m: if new_state_is_container: required_child_models['states'].remove(decider_state_m) obsolete_child_models['states'] = [decider_state_m] return required_child_models, obsolete_child_models
Retrieve child models of state model The function extracts the child state and state element models of the given state model into a dict. It only extracts those properties that are required for a state of type `new_state_class`. Transitions are always left out. :param state_m: state model of which children are to be extracted from :param new_state_class: The type of the new class :return:
def get_transactions_filtered(self, asset_id, operation=None): """Get a list of transactions filtered on some criteria """ txids = backend.query.get_txids_filtered(self.connection, asset_id, operation) for txid in txids: yield self.get_transaction(txid)
Get a list of transactions filtered on some criteria
def _ingest_source(self, source, ps, force=None): """Ingest a single source""" from ambry.bundle.process import call_interval try: from ambry.orm.exc import NotFoundError if not source.is_partition and source.datafile.exists: if not source.datafile.is_finalized: source.datafile.remove() elif force: source.datafile.remove() else: ps.update( message='Source {} already ingested, skipping'.format(source.name), state='skipped') return True if source.is_partition: # Check if the partition exists try: self.library.partition(source.ref) except NotFoundError: # Maybe it is an internal reference, in which case we can just delay # until the partition is built ps.update(message="Not Ingesting {}: referenced partition '{}' does not exist" .format(source.name, source.ref), state='skipped') return True source.state = source.STATES.INGESTING iterable_source, source_pipe = self.source_pipe(source, ps) if not source.is_ingestible: ps.update(message='Not an ingestiable source: {}'.format(source.name), state='skipped', source=source) source.state = source.STATES.NOTINGESTABLE return True ps.update('Ingesting {} from {}'.format(source.spec.name, source.url or source.generator), item_type='rows', item_count=0) @call_interval(5) def ingest_progress_f(i): (desc, n_records, total, rate) = source.datafile.report_progress() ps.update( message='Ingesting {}: rate: {}'.format(source.spec.name, rate), item_count=n_records) source.datafile.load_rows(iterable_source, callback=ingest_progress_f, limit=500 if self.limited_run else None, intuit_type=True, run_stats=False) if source.datafile.meta['warnings']: for w in source.datafile.meta['warnings']: self.error("Ingestion error: {}".format(w)) ps.update(message='Ingested to {}'.format(source.datafile.syspath)) ps.update(message='Updating tables and specs for {}'.format(source.name)) # source.update_table() # Generate the source tables. source.update_spec() # Update header_lines, start_line, etc. if self.limited_run: source.end_line = None # Otherwize, it will be 500 self.build_source_files.sources.objects_to_record() ps.update(message='Ingested {}'.format(source.datafile.path), state='done') source.state = source.STATES.INGESTED self.commit() return True except Exception as e: import traceback from ambry.util import qualified_class_name ps.update( message='Source {} failed with exception: {}'.format(source.name, e), exception_class=qualified_class_name(e), exception_trace=str(traceback.format_exc()), state='error' ) source.state = source.STATES.INGESTING + '_error' self.commit() return False
Ingest a single source
def drop(self, table): """ Drop a table from the schema. :param table: The table :type table: str """ blueprint = self._create_blueprint(table) blueprint.drop() self._build(blueprint)
Drop a table from the schema. :param table: The table :type table: str
def allowPatternsForNameChecking(self, patternsFunc, patternsClass): """ Allow name exceptions by given patterns. @param patternsFunc: patterns of special function names @param patternsClass: patterns of special class names """ cfgParser = self.linter.cfgfile_parser nameChecker = self.getCheckerByName(NameChecker) if not nameChecker: return if patternsFunc: regexFuncAdd = "|((%s).+)$" % "|".join(patternsFunc) else: regexFuncAdd = "" if patternsClass: regexClassAdd = "|((%s).+)$" % "|".join(patternsClass) else: regexClassAdd = "" # Modify regex for function, method and class name. regexMethod = cfgParser.get("BASIC", "method-rgx") + regexFuncAdd regexFunction = cfgParser.get("BASIC", "function-rgx") + regexFuncAdd regexClass = cfgParser.get("BASIC", "class-rgx") + regexClassAdd # Save to config parser. cfgParser.set("BASIC", "method-rgx", regexMethod) cfgParser.set("BASIC", "function-rgx", regexFunction) cfgParser.set("BASIC", "class-rgx", regexClass) # Save to name checker. nameChecker.config.method_rgx = re.compile(regexMethod) nameChecker.config.function_rgx = re.compile(regexFunction) nameChecker.config.class_rgx = re.compile(regexClass)
Allow name exceptions by given patterns. @param patternsFunc: patterns of special function names @param patternsClass: patterns of special class names
def describe_instances(self, xml_bytes): """ Parse the reservations XML payload that is returned from an AWS describeInstances API call. Instead of returning the reservations as the "top-most" object, we return the object that most developers and their code will be interested in: the instances. In instances reservation is available on the instance object. The following instance attributes are optional: * ami_launch_index * key_name * kernel_id * product_codes * ramdisk_id * reason @param xml_bytes: raw XML payload from AWS. """ root = XML(xml_bytes) results = [] # May be a more elegant way to do this: for reservation_data in root.find("reservationSet"): # Create a reservation object with the parsed data. reservation = model.Reservation( reservation_id=reservation_data.findtext("reservationId"), owner_id=reservation_data.findtext("ownerId")) # Get the list of instances. instances = self.instances_set( reservation_data, reservation) results.extend(instances) return results
Parse the reservations XML payload that is returned from an AWS describeInstances API call. Instead of returning the reservations as the "top-most" object, we return the object that most developers and their code will be interested in: the instances. In instances reservation is available on the instance object. The following instance attributes are optional: * ami_launch_index * key_name * kernel_id * product_codes * ramdisk_id * reason @param xml_bytes: raw XML payload from AWS.
def host(proxy=None): ''' This grain is set by the NAPALM grain module only when running in a proxy minion. When Salt is installed directly on the network device, thus running a regular minion, the ``host`` grain provides the physical hostname of the network device, as it would be on an ordinary minion server. When running in a proxy minion, ``host`` points to the value configured in the pillar: :mod:`NAPALM proxy module <salt.proxy.napalm>`. .. note:: The diference between ``host`` and ``hostname`` is that ``host`` provides the physical location - either domain name or IP address, while ``hostname`` provides the hostname as configured on the device. They are not necessarily the same. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt 'device*' grains.get host Output: .. code-block:: yaml device1: ip-172-31-13-136.us-east-2.compute.internal device2: ip-172-31-11-193.us-east-2.compute.internal device3: ip-172-31-2-181.us-east-2.compute.internal ''' if proxy and salt.utils.napalm.is_proxy(__opts__): # this grain is set only when running in a proxy minion # otherwise will use the default Salt grains return {'host': _get_device_grain('hostname', proxy=proxy)}
This grain is set by the NAPALM grain module only when running in a proxy minion. When Salt is installed directly on the network device, thus running a regular minion, the ``host`` grain provides the physical hostname of the network device, as it would be on an ordinary minion server. When running in a proxy minion, ``host`` points to the value configured in the pillar: :mod:`NAPALM proxy module <salt.proxy.napalm>`. .. note:: The diference between ``host`` and ``hostname`` is that ``host`` provides the physical location - either domain name or IP address, while ``hostname`` provides the hostname as configured on the device. They are not necessarily the same. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt 'device*' grains.get host Output: .. code-block:: yaml device1: ip-172-31-13-136.us-east-2.compute.internal device2: ip-172-31-11-193.us-east-2.compute.internal device3: ip-172-31-2-181.us-east-2.compute.internal
def put(self, local, remote, contents=None, quiet=False): """ Puts a local file (or contents) on to the FTP server local can be: a string: path to inpit file a file: opened for reading None: contents are pushed """ remote_dir = os.path.dirname(remote) remote_file = os.path.basename(local)\ if remote.endswith('/') else os.path.basename(remote) if contents: # local is ignored if contents is set local_file = buffer_type(contents) elif isinstance(local, file_type): local_file = local else: local_file = open(local, 'rb') current = self.conn.pwd() self.descend(remote_dir, force=True) size = 0 try: self.conn.storbinary('STOR %s' % remote_file, local_file) size = self.conn.size(remote_file) except: if not quiet: raise finally: local_file.close() self.conn.cwd(current) return size
Puts a local file (or contents) on to the FTP server local can be: a string: path to inpit file a file: opened for reading None: contents are pushed
def fetchGroupInfo(self, *group_ids): """ Get groups' info from IDs, unordered :param group_ids: One or more group ID(s) to query :return: :class:`models.Group` objects, labeled by their ID :rtype: dict :raises: FBchatException if request failed """ threads = self.fetchThreadInfo(*group_ids) groups = {} for id_, thread in threads.items(): if thread.type == ThreadType.GROUP: groups[id_] = thread else: raise FBchatUserError("Thread {} was not a group".format(thread)) return groups
Get groups' info from IDs, unordered :param group_ids: One or more group ID(s) to query :return: :class:`models.Group` objects, labeled by their ID :rtype: dict :raises: FBchatException if request failed
def add_tour_step(self, message, selector=None, name=None, title=None, theme=None, alignment=None, duration=None): """ Allows the user to add tour steps for a website. @Params message - The message to display. selector - The CSS Selector of the Element to attach to. name - If creating multiple tours at the same time, use this to select the tour you wish to add steps to. title - Additional header text that appears above the message. theme - (NON-Bootstrap Tours ONLY) The styling of the tour step. Choose from "light"/"arrows", "dark", "default", "square", and "square-dark". ("arrows" is used if None is selected.) alignment - Choose from "top", "bottom", "left", and "right". ("top" is the default alignment). duration - (Bootstrap Tours ONLY) The amount of time, in seconds, before automatically advancing to the next tour step. """ if not selector: selector = "html" if page_utils.is_xpath_selector(selector): selector = self.convert_to_css_selector(selector, By.XPATH) selector = self.__escape_quotes_if_needed(selector) if not name: name = "default" if name not in self._tour_steps: # By default, will create an IntroJS tour if no tours exist self.create_tour(name=name, theme="introjs") if not title: title = "" title = self.__escape_quotes_if_needed(title) if message: message = self.__escape_quotes_if_needed(message) else: message = "" if not alignment or ( alignment not in ["top", "bottom", "left", "right"]): if "Hopscotch" not in self._tour_steps[name][0]: alignment = "top" else: alignment = "bottom" if "Bootstrap" in self._tour_steps[name][0]: self.__add_bootstrap_tour_step( message, selector=selector, name=name, title=title, alignment=alignment, duration=duration) elif "Hopscotch" in self._tour_steps[name][0]: self.__add_hopscotch_tour_step( message, selector=selector, name=name, title=title, alignment=alignment) elif "IntroJS" in self._tour_steps[name][0]: self.__add_introjs_tour_step( message, selector=selector, name=name, title=title, alignment=alignment) else: self.__add_shepherd_tour_step( message, selector=selector, name=name, title=title, theme=theme, alignment=alignment)
Allows the user to add tour steps for a website. @Params message - The message to display. selector - The CSS Selector of the Element to attach to. name - If creating multiple tours at the same time, use this to select the tour you wish to add steps to. title - Additional header text that appears above the message. theme - (NON-Bootstrap Tours ONLY) The styling of the tour step. Choose from "light"/"arrows", "dark", "default", "square", and "square-dark". ("arrows" is used if None is selected.) alignment - Choose from "top", "bottom", "left", and "right". ("top" is the default alignment). duration - (Bootstrap Tours ONLY) The amount of time, in seconds, before automatically advancing to the next tour step.
def run(self): """Run tracking dependencies """ self.msg.resolving() self.repositories() if self.find_pkg: self.dependencies_list.reverse() self.requires = Utils().dimensional_list(self.dependencies_list) self.dependencies = Utils().remove_dbs(self.requires) if self.dependencies == []: self.dependencies = ["No dependencies"] if "--graph=" in self.flag: self.deps_tree() self.msg.done() pkg_len = len(self.name) + 24 print("") # new line at start self.msg.template(pkg_len) print("| Package {0}{1}{2} dependencies :".format( self.cyan, self.name, self.endc)) self.msg.template(pkg_len) print("\\") print(" +---{0}[ Tree of dependencies ]{1}".format(self.yellow, self.endc)) index = 0 for pkg in self.dependencies: if "--check-deps" in self.flag: used = self.check_used(pkg) self.deps_used(pkg, used) used = "{0} {1}{2}{3}".format( "is dependency -->", self.cyan, ", ".join(used), self.endc) else: used = "" index += 1 installed = "" if find_package(pkg + self.meta.sp, self.meta.pkg_path): if self.meta.use_colors in ["off", "OFF"]: installed = "* " print(" |") print(" {0}{1}: {2}{3}{4} {5}{6}".format( "+--", index, self.green, pkg, self.endc, installed, used)) else: print(" |") print(" {0}{1}: {2}{3}{4} {5}".format( "+--", index, self.red, pkg, self.endc, installed)) if self.meta.use_colors in ["off", "OFF"]: print("\n * = Installed\n") else: print("") # new line at end if "--graph=" in self.flag: self.graph() else: self.msg.done() print("\nNo package was found to match\n") raise SystemExit(1)
Run tracking dependencies
def original_query_sequence_length(self): """Similar to get_get_query_sequence_length, but it also includes hard clipped bases if there is no cigar, then default to trying the sequence :return: the length of the query before any clipping :rtype: int """ if not self.is_aligned() or not self.entries.cigar: return self.query_sequence_length # take the naive approach # we are here with something aligned so take more intelligent cigar apporach return sum([x[0] for x in self.cigar_array if re.match('[HMIS=X]',x[1])])
Similar to get_get_query_sequence_length, but it also includes hard clipped bases if there is no cigar, then default to trying the sequence :return: the length of the query before any clipping :rtype: int
def dir(cls, label, children): """Return ``FSEntry`` directory object.""" return FSEntry(label=label, children=children, type=u"Directory", use=None)
Return ``FSEntry`` directory object.
def evaluate_tour_P(self, tour): """ Use Cythonized version to evaluate the score of a current tour, with better precision on the distance of the contigs. """ from .chic import score_evaluate_P return score_evaluate_P(tour, self.active_sizes, self.P)
Use Cythonized version to evaluate the score of a current tour, with better precision on the distance of the contigs.
def from_file(cls, filename): """ Read an Fiesta input from a file. Currently tested to work with files generated from this class itself. Args: filename: Filename to parse. Returns: FiestaInput object """ with zopen(filename) as f: return cls.from_string(f.read())
Read an Fiesta input from a file. Currently tested to work with files generated from this class itself. Args: filename: Filename to parse. Returns: FiestaInput object
def update_asset_content(self, asset_content_form): """Updates an existing asset content. arg: asset_content_form (osid.repository.AssetContentForm): the form containing the elements to be updated raise: IllegalState - ``asset_content_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``asset_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``asset_content_form`` did not originate from ``get_asset_content_form_for_update()`` *compliance: mandatory -- This method must be implemented.* """ # Implemented from template for # osid.repository.AssetAdminSession.update_asset_content_template from dlkit.abstract_osid.repository.objects import AssetContentForm as ABCAssetContentForm collection = JSONClientValidated('repository', collection='Asset', runtime=self._runtime) if not isinstance(asset_content_form, ABCAssetContentForm): raise errors.InvalidArgument('argument type is not an AssetContentForm') if not asset_content_form.is_for_update(): raise errors.InvalidArgument('the AssetContentForm is for update only, not create') try: if self._forms[asset_content_form.get_id().get_identifier()] == UPDATED: raise errors.IllegalState('asset_content_form already used in an update transaction') except KeyError: raise errors.Unsupported('asset_content_form did not originate from this session') if not asset_content_form.is_valid(): raise errors.InvalidArgument('one or more of the form elements is invalid') asset_id = Id(asset_content_form._my_map['assetId']).get_identifier() asset = collection.find_one( {'$and': [{'_id': ObjectId(asset_id)}, {'assigned' + self._catalog_name + 'Ids': {'$in': [str(self._catalog_id)]}}]}) index = 0 found = False for i in asset['assetContents']: if i['_id'] == ObjectId(asset_content_form._my_map['_id']): asset['assetContents'].pop(index) asset['assetContents'].insert(index, asset_content_form._my_map) found = True break index += 1 if not found: raise errors.NotFound() try: collection.save(asset) except: # what exceptions does mongodb save raise? raise errors.OperationFailed() self._forms[asset_content_form.get_id().get_identifier()] = UPDATED # Note: this is out of spec. The OSIDs don't require an object to be returned: from .objects import AssetContent return AssetContent( osid_object_map=asset_content_form._my_map, runtime=self._runtime, proxy=self._proxy)
Updates an existing asset content. arg: asset_content_form (osid.repository.AssetContentForm): the form containing the elements to be updated raise: IllegalState - ``asset_content_form`` already used in an update transaction raise: InvalidArgument - the form contains an invalid value raise: NullArgument - ``asset_form`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure raise: Unsupported - ``asset_content_form`` did not originate from ``get_asset_content_form_for_update()`` *compliance: mandatory -- This method must be implemented.*
def draw_build_target(self, surf): """Draw the build target.""" round_half = lambda v, cond: round(v - 0.5) + 0.5 if cond else round(v) queued_action = self._queued_action if queued_action: radius = queued_action.footprint_radius if radius: pos = self.get_mouse_pos() if pos: pos = point.Point(round_half(pos.world_pos.x, (radius * 2) % 2), round_half(pos.world_pos.y, (radius * 2) % 2)) surf.draw_circle( colors.PLAYER_ABSOLUTE_PALETTE[ self._obs.observation.player_common.player_id], pos, radius)
Draw the build target.
def update_metadata_filters(metadata, jupyter_md, cell_metadata): """Update or set the notebook and cell metadata filters""" cell_metadata = [m for m in cell_metadata if m not in ['language', 'magic_args']] if 'cell_metadata_filter' in metadata.get('jupytext', {}): metadata_filter = metadata_filter_as_dict(metadata.get('jupytext', {})['cell_metadata_filter']) if isinstance(metadata_filter.get('excluded'), list): metadata_filter['excluded'] = [key for key in metadata_filter['excluded'] if key not in cell_metadata] metadata_filter.setdefault('additional', []) if isinstance(metadata_filter.get('additional'), list): for key in cell_metadata: if key not in metadata_filter['additional']: metadata_filter['additional'].append(key) metadata.setdefault('jupytext', {})['cell_metadata_filter'] = metadata_filter_as_string(metadata_filter) if not jupyter_md: # Set a metadata filter equal to the current metadata in script cell_metadata = {'additional': cell_metadata, 'excluded': 'all'} metadata.setdefault('jupytext', {})['notebook_metadata_filter'] = '-all' metadata.setdefault('jupytext', {})['cell_metadata_filter'] = metadata_filter_as_string(cell_metadata)
Update or set the notebook and cell metadata filters
def handle_padding(self, padding): '''Pads the image with transparent pixels if necessary.''' left = padding[0] top = padding[1] right = padding[2] bottom = padding[3] offset_x = 0 offset_y = 0 new_width = self.engine.size[0] new_height = self.engine.size[1] if left > 0: offset_x = left new_width += left if top > 0: offset_y = top new_height += top if right > 0: new_width += right if bottom > 0: new_height += bottom new_engine = self.context.modules.engine.__class__(self.context) new_engine.image = new_engine.gen_image((new_width, new_height), '#fff') new_engine.enable_alpha() new_engine.paste(self.engine, (offset_x, offset_y)) self.engine.image = new_engine.image
Pads the image with transparent pixels if necessary.
def _load_cached_tlds(self): """ Loads TLDs from cached file to set. :return: Set of current TLDs :rtype: set """ # check if cached file is readable if not os.access(self._tld_list_path, os.R_OK): self._logger.error("Cached file is not readable for current " "user. ({})".format(self._tld_list_path)) raise CacheFileError( "Cached file is not readable for current user." ) set_of_tlds = set() with open(self._tld_list_path, 'r') as f_cache_tld: for line in f_cache_tld: tld = line.strip().lower() # skip empty lines if not tld: continue # skip comments if tld[0] == '#': continue set_of_tlds.add("." + tld) set_of_tlds.add("." + idna.decode(tld)) return set_of_tlds
Loads TLDs from cached file to set. :return: Set of current TLDs :rtype: set
def bySignificator(self, ID): """ Returns all directions to a significator. """ res = [] for direction in self.table: if ID in direction[2]: res.append(direction) return res
Returns all directions to a significator.
def plot_eval_results(eval_results, metric=None, xaxislabel=None, yaxislabel=None, title=None, title_fontsize='x-large', axes_title_fontsize='large', show_metric_direction=True, metric_direction_font_size='large', subplots_opts=None, subplots_adjust_opts=None, figsize='auto', **fig_kwargs): """ Plot the evaluation results from `eval_results`. `eval_results` must be a sequence containing `(param, values)` tuples, where `param` is the parameter value to appear on the x axis and `values` can be a dict structure containing the metric values. `eval_results` can be created using the `results_by_parameter` function from the `topicmod.common` module. Set `metric` to plot only a specific metric. Set `xaxislabel` for a label on the x-axis. Set `yaxislabel` for a label on the y-axis. Set `title` for a plot title. Options in a dict `subplots_opts` will be passed to `plt.subplots(...)`. Options in a dict `subplots_adjust_opts` will be passed to `fig.subplots_adjust(...)`. `figsize` can be set to a tuple `(width, height)` or to `"auto"` (default) which will set the size to `(8, 2 * <num. of metrics>)`. """ if type(eval_results) not in (list, tuple) or not eval_results: raise ValueError('`eval_results` must be a list or tuple with at least one element') if type(eval_results[0]) not in (list, tuple) or len(eval_results[0]) != 2: raise ValueError('`eval_results` must be a list or tuple containing a (param, values) tuple. ' 'Maybe `eval_results` must be converted with `results_by_parameter`.') if metric is not None and type(metric) not in (list, tuple): metric = [metric] elif metric is None: # remove special evaluation result 'model': the calculated model itself metric = list(set(next(iter(eval_results))[1].keys()) - {'model'}) metric = sorted(metric) metric_direction = [] for m in metric: if m == 'perplexity': metric_direction.append('minimize') else: m_fn_name = 'metric_%s' % (m[:16] if m.startswith('coherence_gensim') else m) m_fn = getattr(evaluate, m_fn_name, None) if m_fn: metric_direction.append(getattr(m_fn, 'direction', 'unknown')) else: metric_direction.append('unknown') n_metrics = len(metric) assert n_metrics == len(metric_direction) metrics_ordered = [] for m_dir in sorted(set(metric_direction), reverse=True): metrics_ordered.extend([(m, d) for m, d in zip(metric, metric_direction) if d == m_dir]) assert n_metrics == len(metrics_ordered) # get figure and subplots (axes) if figsize == 'auto': figsize = (8, 2*n_metrics) subplots_kwargs = dict(nrows=n_metrics, ncols=1, sharex=True, constrained_layout=True, figsize=figsize) subplots_kwargs.update(subplots_opts or {}) subplots_kwargs.update(fig_kwargs) fig, axes = plt.subplots(**subplots_kwargs) # set title if title: fig.suptitle(title, fontsize=title_fontsize) x = list(zip(*eval_results))[0] # set adjustments if title: subplots_adjust_kwargs = dict(top=0.9, hspace=0.3) else: subplots_adjust_kwargs = {} subplots_adjust_kwargs.update(subplots_adjust_opts or {}) if subplots_adjust_kwargs: fig.subplots_adjust(**subplots_adjust_kwargs) # draw subplot for each metric axes_pos_per_dir = defaultdict(list) for i, (ax, (m, m_dir)) in enumerate(zip(axes.flatten(), metrics_ordered)): if show_metric_direction: axes_pos_per_dir[m_dir].append(ax.get_position()) y = [metric_res[m] for _, metric_res in eval_results] ax.plot(x, y, label=m) ax.set_title(m, fontsize=axes_title_fontsize) # set axis labels if xaxislabel and i == len(metric)-1: ax.set_xlabel(xaxislabel) if yaxislabel: ax.set_ylabel(yaxislabel) # show grouped metric direction on the left if axes_pos_per_dir: # = if show_metric_direction left_xs = [] ys = [] for m_dir, bboxes in axes_pos_per_dir.items(): left_xs.append(min(bb.x0 for bb in bboxes)) min_y = min(bb.y0 for bb in bboxes) max_y = max(bb.y1 for bb in bboxes) ys.append((min_y, max_y)) left_x = min(left_xs) / 2.5 fig.lines = [] for (min_y, max_y), m_dir in zip(ys, axes_pos_per_dir.keys()): center_y = min_y + (max_y - min_y) / 2 fig.lines.append(Line2D((left_x, left_x), (min_y, max_y), transform=fig.transFigure, linewidth=5, color='lightgray')) fig.text(left_x / 1.5, center_y, m_dir, fontsize=metric_direction_font_size, rotation='vertical', horizontalalignment='right', verticalalignment='center') return fig, axes
Plot the evaluation results from `eval_results`. `eval_results` must be a sequence containing `(param, values)` tuples, where `param` is the parameter value to appear on the x axis and `values` can be a dict structure containing the metric values. `eval_results` can be created using the `results_by_parameter` function from the `topicmod.common` module. Set `metric` to plot only a specific metric. Set `xaxislabel` for a label on the x-axis. Set `yaxislabel` for a label on the y-axis. Set `title` for a plot title. Options in a dict `subplots_opts` will be passed to `plt.subplots(...)`. Options in a dict `subplots_adjust_opts` will be passed to `fig.subplots_adjust(...)`. `figsize` can be set to a tuple `(width, height)` or to `"auto"` (default) which will set the size to `(8, 2 * <num. of metrics>)`.
def add_tooltip_to_highlighted_item(self, index): """ Add a tooltip showing the full path of the currently highlighted item of the PathComboBox. """ self.setItemData(index, self.itemText(index), Qt.ToolTipRole)
Add a tooltip showing the full path of the currently highlighted item of the PathComboBox.
def iter_following(username, number=-1, etag=None): """List the people ``username`` follows. :param str username: (required), login of the user :param int number: (optional), number of users being followed by username to return. Default: -1, return all of them :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`User <github3.users.User>` """ return gh.iter_following(username, number, etag) if username else []
List the people ``username`` follows. :param str username: (required), login of the user :param int number: (optional), number of users being followed by username to return. Default: -1, return all of them :param str etag: (optional), ETag from a previous request to the same endpoint :returns: generator of :class:`User <github3.users.User>`
def _set_people(self, people): """ Sets who the object is sent to """ if hasattr(people, "object_type"): people = [people] elif hasattr(people, "__iter__"): people = list(people) return people
Sets who the object is sent to
def get_hosts(self): """ Return a list of parsed hosts info, with the limit applied if required. """ limited_hosts = {} if self.limit is not None: # Find hosts and groups of hosts to include for include in self.limit['include']: # Include whole group for hostname in self.hosts_in_group(include): limited_hosts[hostname] = self.hosts[hostname] # Include individual host if include in self.hosts: limited_hosts[include] = self.hosts[include] # Find hosts and groups of hosts to exclude for exclude in self.limit["exclude"]: # Exclude whole group for hostname in self.hosts_in_group(exclude): if hostname in limited_hosts: limited_hosts.pop(hostname) # Exclude individual host if exclude in limited_hosts: limited_hosts.pop(exclude) return limited_hosts else: # Return all hosts return self.hosts
Return a list of parsed hosts info, with the limit applied if required.
def do_response(self, response_args=None, request=None, **kwargs): """ **Placeholder for the time being** :param response_args: :param request: :param kwargs: request arguments :return: Response information """ links = [Link(href=h, rel=OIC_ISSUER) for h in kwargs['hrefs']] _response = JRD(subject=kwargs['subject'], links=links) info = { 'response': _response.to_json(), 'http_headers': [('Content-type', 'application/json')] } return info
**Placeholder for the time being** :param response_args: :param request: :param kwargs: request arguments :return: Response information
def _update_resource_view(self, log=False): # type: () -> bool """Check if resource view exists in HDX and if so, update resource view Returns: bool: True if updated and False if not """ update = False if 'id' in self.data and self._load_from_hdx('resource view', self.data['id']): update = True else: if 'resource_id' in self.data: resource_views = self.get_all_for_resource(self.data['resource_id']) for resource_view in resource_views: if self.data['title'] == resource_view['title']: self.old_data = self.data self.data = resource_view.data update = True break if update: if log: logger.warning('resource view exists. Updating %s' % self.data['id']) self._merge_hdx_update('resource view', 'id') return update
Check if resource view exists in HDX and if so, update resource view Returns: bool: True if updated and False if not
def wp_status(self): '''show status of wp download''' try: print("Have %u of %u waypoints" % (self.wploader.count()+len(self.wp_received), self.wploader.expected_count)) except Exception: print("Have %u waypoints" % (self.wploader.count()+len(self.wp_received)))
show status of wp download
def auth(self, request): """ let's auth the user to the Service """ client = self.get_evernote_client() request_token = client.get_request_token(self.callback_url(request)) # Save the request token information for later request.session['oauth_token'] = request_token['oauth_token'] request.session['oauth_token_secret'] = request_token['oauth_token_secret'] # Redirect the user to the Evernote authorization URL # return the URL string which will be used by redirect() # from the calling func return client.get_authorize_url(request_token)
let's auth the user to the Service
def create_char(self, location, bitmap): """Create a new character. The HD44780 supports up to 8 custom characters (location 0-7). :param location: The place in memory where the character is stored. Values need to be integers between 0 and 7. :type location: int :param bitmap: The bitmap containing the character. This should be a tuple of 8 numbers, each representing a 5 pixel row. :type bitmap: tuple of int :raises AssertionError: Raised when an invalid location is passed in or when bitmap has an incorrect size. Example: .. sourcecode:: python >>> smiley = ( ... 0b00000, ... 0b01010, ... 0b01010, ... 0b00000, ... 0b10001, ... 0b10001, ... 0b01110, ... 0b00000, ... ) >>> lcd.create_char(0, smiley) """ assert 0 <= location <= 7, 'Only locations 0-7 are valid.' assert len(bitmap) == 8, 'Bitmap should have exactly 8 rows.' # Store previous position pos = self.cursor_pos # Write character to CGRAM self.command(c.LCD_SETCGRAMADDR | location << 3) for row in bitmap: self._send_data(row) # Restore cursor pos self.cursor_pos = pos
Create a new character. The HD44780 supports up to 8 custom characters (location 0-7). :param location: The place in memory where the character is stored. Values need to be integers between 0 and 7. :type location: int :param bitmap: The bitmap containing the character. This should be a tuple of 8 numbers, each representing a 5 pixel row. :type bitmap: tuple of int :raises AssertionError: Raised when an invalid location is passed in or when bitmap has an incorrect size. Example: .. sourcecode:: python >>> smiley = ( ... 0b00000, ... 0b01010, ... 0b01010, ... 0b00000, ... 0b10001, ... 0b10001, ... 0b01110, ... 0b00000, ... ) >>> lcd.create_char(0, smiley)
def closed(self) -> bool: '''Return whether the connection is closed.''' return not self.writer or not self.reader or self.reader.at_eof()
Return whether the connection is closed.
def create_tar (archive, compression, cmd, verbosity, interactive, filenames): """Create a TAR archive.""" cmdlist = [cmd, '-c'] add_star_opts(cmdlist, compression, verbosity) cmdlist.append("file=%s" % archive) cmdlist.extend(filenames) return cmdlist
Create a TAR archive.
def write(self): """Write the current queue to a file. We need this to continue an earlier session.""" queue_path = os.path.join(self.config_dir, 'queue') queue_file = open(queue_path, 'wb+') try: pickle.dump(self.queue, queue_file, -1) except Exception: print('Error while writing to queue file. Wrong file permissions?') queue_file.close()
Write the current queue to a file. We need this to continue an earlier session.
def connect(self, path="", headers=None, query=None, timeout=0, **kwargs): """ make the actual connection to the websocket :param headers: dict, key/val pairs of any headers to add to connection, if you would like to override headers just pass in an empty value :param query: dict, any query string params you want to send up with the connection url :returns: Payload, this will return the CONNECT response from the websocket """ ret = None ws_url = self.get_fetch_url(path, query) ws_headers = self.get_fetch_headers("GET", headers) ws_headers = ['{}: {}'.format(h[0], h[1]) for h in ws_headers.items() if h[1]] timeout = self.get_timeout(timeout=timeout, **kwargs) self.set_trace(kwargs.pop("trace", False)) #pout.v(websocket_url, websocket_headers, self.query_kwargs, self.headers) try: logger.debug("{} connecting to {}".format(self.client_id, ws_url)) self.ws = websocket.create_connection( ws_url, header=ws_headers, timeout=timeout, sslopt={'cert_reqs':ssl.CERT_NONE}, ) ret = self.recv_callback(callback=lambda r: r.uuid == "CONNECT") if ret.code >= 400: raise IOError("Failed to connect with code {}".format(ret.code)) # self.headers = headers # self.query_kwargs = query_kwargs except websocket.WebSocketTimeoutException: raise IOError("Failed to connect within {} seconds".format(timeout)) except websocket.WebSocketException as e: raise IOError("Failed to connect with error: {}".format(e)) except socket.error as e: # this is an IOError, I just wanted to be aware of that, most common # problem is: [Errno 111] Connection refused raise return ret
make the actual connection to the websocket :param headers: dict, key/val pairs of any headers to add to connection, if you would like to override headers just pass in an empty value :param query: dict, any query string params you want to send up with the connection url :returns: Payload, this will return the CONNECT response from the websocket
def validate(self, data): """Validate the data against the schema. """ validator = self._schema.validator(self._id) validator.validate(data)
Validate the data against the schema.
def shutdown_request(self, request): """ Called to shutdown and close an individual request. """ try: request.shutdown(socket.SHUT_WR) except socket.error: pass self.close_request(request)
Called to shutdown and close an individual request.
def run(self): """ If the connection drops, then run_forever will terminate and a reconnection attempt will be made. """ while True: self.connect_lock.acquire() if self.stopped(): return self.__connect() self.connect_lock.release() self.ws.run_forever()
If the connection drops, then run_forever will terminate and a reconnection attempt will be made.
def volumes(self): """This property prepares the list of volumes :return a list of volumes. """ return sys_volumes.VolumeCollection( self._conn, utils.get_subresource_path_by(self, 'Volumes'), redfish_version=self.redfish_version)
This property prepares the list of volumes :return a list of volumes.
def remove_parameter(self, param_id=None, name=None, ref_id=None, ): """ Removes parameters based on function arguments. This can remove parameters based on the following param values: param/@id param/@name param/@ref_id Each input is mutually exclusive. Calling this function with multiple values set will cause an IOCParseError exception. Calling this function without setting one value will raise an exception. :param param_id: The id of the parameter to remove. :param name: The name of the parameter to remove. :param ref_id: The IndicatorItem/Indicator id of the parameter to remove. :return: Number of parameters removed. """ l = [] if param_id: l.append('param_id') if name: l.append('name') if ref_id: l.append('ref_id') if len(l) > 1: raise IOCParseError('Must specify only param_id, name or ref_id. Specified {}'.format(str(l))) elif len(l) < 1: raise IOCParseError('Must specifiy an param_id, name or ref_id to remove a paramater') counter = 0 parameters_node = self.parameters if param_id: params = parameters_node.xpath('//param[@id="{}"]'.format(param_id)) for param in params: parameters_node.remove(param) counter += 1 elif name: params = parameters_node.xpath('//param[@name="{}"]'.format(name)) for param in params: parameters_node.remove(param) counter += 1 elif ref_id: params = parameters_node.xpath('//param[@ref-id="{}"]'.format(ref_id)) for param in params: parameters_node.remove(param) counter += 1 return counter
Removes parameters based on function arguments. This can remove parameters based on the following param values: param/@id param/@name param/@ref_id Each input is mutually exclusive. Calling this function with multiple values set will cause an IOCParseError exception. Calling this function without setting one value will raise an exception. :param param_id: The id of the parameter to remove. :param name: The name of the parameter to remove. :param ref_id: The IndicatorItem/Indicator id of the parameter to remove. :return: Number of parameters removed.
async def _write(self, path, data, *, flags=None, cas=None, acquire=None, release=None): """Sets the key to the given value. Returns: bool: ``True`` on success """ if not isinstance(data, bytes): raise ValueError("value must be bytes") path = "/v1/kv/%s" % path response = await self._api.put( path, params={ "flags": flags, "cas": cas, "acquire": acquire, "release": release }, data=data, headers={"Content-Type": "application/octet-stream"}) return response
Sets the key to the given value. Returns: bool: ``True`` on success
def is_admin(): """ https://stackoverflow.com/a/19719292 @return: True if the current user is an 'Admin' whatever that means (root on Unix), otherwise False. Warning: The inner function fails unless you have Windows XP SP2 or higher. The failure causes a traceback to be printed and this function to return False. """ if os.name == 'nt': import ctypes import traceback # WARNING: requires Windows XP SP2 or higher! try: return ctypes.windll.shell32.IsUserAnAdmin() except: traceback.print_exc() return False else: # Check for root on Posix return os.getuid() == 0
https://stackoverflow.com/a/19719292 @return: True if the current user is an 'Admin' whatever that means (root on Unix), otherwise False. Warning: The inner function fails unless you have Windows XP SP2 or higher. The failure causes a traceback to be printed and this function to return False.
def _LinearFoldByteStream(self, mapped_value, **unused_kwargs): """Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream. """ try: attribute_values = [ getattr(mapped_value, attribute_name, None) for attribute_name in self._attribute_names] attribute_values = [ value for value in attribute_values if value is not None] return self._operation.WriteTo(tuple(attribute_values)) except Exception as exception: error_string = ( 'Unable to write: {0:s} to byte stream with error: {1!s}').format( self._data_type_definition.name, exception) raise errors.FoldingError(error_string)
Folds the data type into a byte stream. Args: mapped_value (object): mapped value. Returns: bytes: byte stream. Raises: FoldingError: if the data type definition cannot be folded into the byte stream.
def _base_iterator( self, circuit: circuits.Circuit, qubit_order: ops.QubitOrderOrList, initial_state: Union[int, np.ndarray], perform_measurements: bool=True, ) -> Iterator['XmonStepResult']: """See definition in `cirq.SimulatesIntermediateState`. If the initial state is an int, the state is set to the computational basis state corresponding to this state. Otherwise if the initial state is a np.ndarray it is the full initial state. In this case it must be the correct size, be normalized (an L2 norm of 1), and be safely castable to an appropriate dtype for the simulator. """ qubits = ops.QubitOrder.as_qubit_order(qubit_order).order_for( circuit.all_qubits()) qubit_map = {q: i for i, q in enumerate(reversed(qubits))} if isinstance(initial_state, np.ndarray): initial_state = initial_state.astype(dtype=np.complex64, casting='safe') with xmon_stepper.Stepper( num_qubits=len(qubits), num_prefix_qubits=self.options.num_prefix_qubits, initial_state=initial_state, min_qubits_before_shard=self.options.min_qubits_before_shard, use_processes=self.options.use_processes ) as stepper: if len(circuit) == 0: yield XmonStepResult(stepper, qubit_map, {}) for moment in circuit: measurements = collections.defaultdict( list) # type: Dict[str, List[bool]] phase_map = {} # type: Dict[Tuple[int, ...], float] for op in moment.operations: gate = cast(ops.GateOperation, op).gate if isinstance(gate, ops.ZPowGate): index = qubit_map[op.qubits[0]] phase_map[(index,)] = cast(float, gate.exponent) elif isinstance(gate, ops.CZPowGate): index0 = qubit_map[op.qubits[0]] index1 = qubit_map[op.qubits[1]] phase_map[(index0, index1)] = cast(float, gate.exponent) elif isinstance(gate, ops.XPowGate): index = qubit_map[op.qubits[0]] stepper.simulate_w( index=index, half_turns=gate.exponent, axis_half_turns=0) elif isinstance(gate, ops.YPowGate): index = qubit_map[op.qubits[0]] stepper.simulate_w( index=index, half_turns=gate.exponent, axis_half_turns=0.5) elif isinstance(gate, ops.PhasedXPowGate): index = qubit_map[op.qubits[0]] stepper.simulate_w( index=index, half_turns=gate.exponent, axis_half_turns=gate.phase_exponent) elif isinstance(gate, ops.MeasurementGate): if perform_measurements: invert_mask = ( gate.invert_mask or len(op.qubits) * (False,)) for qubit, invert in zip(op.qubits, invert_mask): index = qubit_map[qubit] result = stepper.simulate_measurement(index) if invert: result = not result key = protocols.measurement_key(gate) measurements[key].append(result) else: # coverage: ignore raise TypeError('{!r} is not supported by the ' 'xmon simulator.'.format(gate)) stepper.simulate_phases(phase_map) yield XmonStepResult(stepper, qubit_map, measurements)
See definition in `cirq.SimulatesIntermediateState`. If the initial state is an int, the state is set to the computational basis state corresponding to this state. Otherwise if the initial state is a np.ndarray it is the full initial state. In this case it must be the correct size, be normalized (an L2 norm of 1), and be safely castable to an appropriate dtype for the simulator.