positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def restore_configuration_files(self): """ restore a previously saved postgresql.conf """ try: for f in self._configuration_to_save: config_file = os.path.join(self._config_dir, f) backup_file = os.path.join(self._data_dir, f + '.backup') if not os.path.isfile(config_file): if os.path.isfile(backup_file): shutil.copy(backup_file, config_file) # Previously we didn't backup pg_ident.conf, if file is missing just create empty elif f == 'pg_ident.conf': open(config_file, 'w').close() except IOError: logger.exception('unable to restore configuration files from backup')
restore a previously saved postgresql.conf
def remove(self, bw): """ Removes a buffer watch identifier. @type bw: L{BufferWatch} @param bw: Buffer watch identifier. @raise KeyError: The buffer watch identifier was already removed. """ try: self.__ranges.remove(bw) except KeyError: if not bw.oneshot: raise
Removes a buffer watch identifier. @type bw: L{BufferWatch} @param bw: Buffer watch identifier. @raise KeyError: The buffer watch identifier was already removed.
def setup_foreground_minifollowups(workflow, coinc_file, single_triggers, tmpltbank_file, insp_segs, insp_data_name, insp_anal_name, dax_output, out_dir, tags=None): """ Create plots that followup the Nth loudest coincident injection from a statmap produced HDF file. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating coinc_file: single_triggers: list of pycbc.workflow.File A list cointaining the file objects associated with the merged single detector trigger files for each ifo. tmpltbank_file: pycbc.workflow.File The file object pointing to the HDF format template bank insp_segs: SegFile The segment file containing the data read and analyzed by each inspiral job. insp_data_name: str The name of the segmentlist storing data read. insp_anal_name: str The name of the segmentlist storing data analyzed. out_dir: path The directory to store minifollowups result plots and files tags: {None, optional} Tags to add to the minifollowups executables Returns ------- layout: list A list of tuples which specify the displayed file layout for the minifollops plots. """ logging.info('Entering minifollowups module') if not workflow.cp.has_section('workflow-minifollowups'): logging.info('There is no [workflow-minifollowups] section in configuration file') logging.info('Leaving minifollowups') return tags = [] if tags is None else tags makedir(dax_output) # turn the config file into a File class config_path = os.path.abspath(dax_output + '/' + '_'.join(tags) + 'foreground_minifollowup.ini') workflow.cp.write(open(config_path, 'w')) config_file = wdax.File(os.path.basename(config_path)) config_file.PFN(urlparse.urljoin('file:', urllib.pathname2url(config_path)), site='local') exe = Executable(workflow.cp, 'foreground_minifollowup', ifos=workflow.ifos, out_dir=dax_output) node = exe.create_node() node.add_input_opt('--config-files', config_file) node.add_input_opt('--bank-file', tmpltbank_file) node.add_input_opt('--statmap-file', coinc_file) node.add_multiifo_input_list_opt('--single-detector-triggers', single_triggers) node.add_input_opt('--inspiral-segments', insp_segs) node.add_opt('--inspiral-data-read-name', insp_data_name) node.add_opt('--inspiral-data-analyzed-name', insp_anal_name) node.new_output_file_opt(workflow.analysis_time, '.dax', '--output-file', tags=tags) node.new_output_file_opt(workflow.analysis_time, '.dax.map', '--output-map', tags=tags) node.new_output_file_opt(workflow.analysis_time, '.tc.txt', '--transformation-catalog', tags=tags) name = node.output_files[0].name map_file = node.output_files[1] tc_file = node.output_files[2] node.add_opt('--workflow-name', name) node.add_opt('--output-dir', out_dir) workflow += node # execute this in a sub-workflow fil = node.output_files[0] # determine if a staging site has been specified try: staging_site = workflow.cp.get('workflow-foreground_minifollowups', 'staging-site') except: staging_site = None job = dax.DAX(fil) job.addArguments('--basename %s' % os.path.splitext(os.path.basename(name))[0]) Workflow.set_job_properties(job, map_file, tc_file, staging_site=staging_site) workflow._adag.addJob(job) dep = dax.Dependency(parent=node._dax_node, child=job) workflow._adag.addDependency(dep) logging.info('Leaving minifollowups module')
Create plots that followup the Nth loudest coincident injection from a statmap produced HDF file. Parameters ---------- workflow: pycbc.workflow.Workflow The core workflow instance we are populating coinc_file: single_triggers: list of pycbc.workflow.File A list cointaining the file objects associated with the merged single detector trigger files for each ifo. tmpltbank_file: pycbc.workflow.File The file object pointing to the HDF format template bank insp_segs: SegFile The segment file containing the data read and analyzed by each inspiral job. insp_data_name: str The name of the segmentlist storing data read. insp_anal_name: str The name of the segmentlist storing data analyzed. out_dir: path The directory to store minifollowups result plots and files tags: {None, optional} Tags to add to the minifollowups executables Returns ------- layout: list A list of tuples which specify the displayed file layout for the minifollops plots.
def StringIO(*args, **kwargs): """StringIO constructor shim for the async wrapper.""" raw = sync_io.StringIO(*args, **kwargs) return AsyncStringIOWrapper(raw)
StringIO constructor shim for the async wrapper.
def _check_if_must_download(request_list, redownload): """ Updates request.will_download attribute of each request in request_list. **Note:** the function mutates the elements of the list! :param request_list: a list of ``DownloadRequest`` instances :type: list[DownloadRequest] :param redownload: tells whether to download the data again or not :type: bool """ for request in request_list: request.will_download = (request.save_response or request.return_data) \ and (not request.is_downloaded() or redownload)
Updates request.will_download attribute of each request in request_list. **Note:** the function mutates the elements of the list! :param request_list: a list of ``DownloadRequest`` instances :type: list[DownloadRequest] :param redownload: tells whether to download the data again or not :type: bool
def fit_freq_min_max(self, training_signal): """Defines a spectral mask based on training data using min and max values of each frequency component Args: training_signal: Training data """ window_length = len(self.window) window_weight = sum(self.window) max_mask = np.zeros(int(window_length / 2) + 1) min_mask = np.zeros(int(window_length / 2) + 1) for i in range(0, len(training_signal) - window_length - 1): rfft = np.fft.rfft(training_signal[i:i + window_length] * self.window) temp = np.abs(rfft) / window_weight max_mask = np.maximum(max_mask, temp) min_mask = np.minimum(min_mask, temp) self.mask_top = self.gain * max_mask self.mask_bottom = min_mask / self.gain
Defines a spectral mask based on training data using min and max values of each frequency component Args: training_signal: Training data
def create_snapshot(self, xml_bytes): """Parse the XML returned by the C{CreateSnapshot} function. @param xml_bytes: XML bytes with a C{CreateSnapshotResponse} root element. @return: The L{Snapshot} instance created. TODO: ownerId, volumeSize, description. """ root = XML(xml_bytes) snapshot_id = root.findtext("snapshotId") volume_id = root.findtext("volumeId") status = root.findtext("status") start_time = root.findtext("startTime") start_time = datetime.strptime( start_time[:19], "%Y-%m-%dT%H:%M:%S") progress = root.findtext("progress")[:-1] progress = float(progress or "0") / 100. return model.Snapshot( snapshot_id, volume_id, status, start_time, progress)
Parse the XML returned by the C{CreateSnapshot} function. @param xml_bytes: XML bytes with a C{CreateSnapshotResponse} root element. @return: The L{Snapshot} instance created. TODO: ownerId, volumeSize, description.
def add_sample(self, name, labels, value, timestamp=None, exemplar=None): """Add a sample to the metric. Internal-only, do not use.""" self.samples.append(Sample(name, labels, value, timestamp, exemplar))
Add a sample to the metric. Internal-only, do not use.
def _make_field_info(self, field_name, field): """ Create the information that the template needs to render a form field for this field. """ supported_field_types = ( (Integer, 'integer'), (Float, 'float'), (Boolean, 'boolean'), (String, 'string'), (List, 'list'), (DateTime, 'datepicker'), (JSONField, 'generic'), # This is last so as a last resort we display a text field w/ the JSON string ) if self.service_declaration("i18n"): ugettext = self.ugettext else: def ugettext(text): """ Dummy ugettext method that doesn't do anything """ return text info = { 'name': field_name, 'display_name': ugettext(field.display_name) if field.display_name else "", 'is_set': field.is_set_on(self), 'default': field.default, 'value': field.read_from(self), 'has_values': False, 'help': ugettext(field.help) if field.help else "", 'allow_reset': field.runtime_options.get('resettable_editor', True), 'list_values': None, # Only available for List fields 'has_list_values': False, # True if list_values_provider exists, even if it returned no available options } for type_class, type_name in supported_field_types: if isinstance(field, type_class): info['type'] = type_name # If String fields are declared like String(..., multiline_editor=True), then call them "text" type: editor_type = field.runtime_options.get('multiline_editor') if type_class is String and editor_type: if editor_type == "html": info['type'] = 'html' else: info['type'] = 'text' if type_class is List and field.runtime_options.get('list_style') == "set": # List represents unordered, unique items, optionally drawn from list_values_provider() info['type'] = 'set' elif type_class is List: info['type'] = "generic" # disable other types of list for now until properly implemented break if "type" not in info: raise NotImplementedError("StudioEditableXBlockMixin currently only supports fields derived from JSONField") if info["type"] in ("list", "set"): info["value"] = [json.dumps(val) for val in info["value"]] info["default"] = json.dumps(info["default"]) elif info["type"] == "generic": # Convert value to JSON string if we're treating this field generically: info["value"] = json.dumps(info["value"]) info["default"] = json.dumps(info["default"]) elif info["type"] == "datepicker": if info["value"]: info["value"] = info["value"].strftime("%m/%d/%Y") if info["default"]: info["default"] = info["default"].strftime("%m/%d/%Y") if 'values_provider' in field.runtime_options: values = field.runtime_options["values_provider"](self) else: values = field.values if values and not isinstance(field, Boolean): # This field has only a limited number of pre-defined options. # Protip: when defining the field, values= can be a callable. if isinstance(field.values, dict) and isinstance(field, (Float, Integer)): # e.g. {"min": 0 , "max": 10, "step": .1} for option in field.values: if option in ("min", "max", "step"): info[option] = field.values.get(option) else: raise KeyError("Invalid 'values' key. Should be like values={'min': 1, 'max': 10, 'step': 1}") elif isinstance(values[0], dict) and "display_name" in values[0] and "value" in values[0]: # e.g. [ {"display_name": "Always", "value": "always"}, ... ] for value in values: assert "display_name" in value and "value" in value info['values'] = values else: # e.g. [1, 2, 3] - we need to convert it to the [{"display_name": x, "value": x}] format info['values'] = [{"display_name": text_type(val), "value": val} for val in values] info['has_values'] = 'values' in info if info["type"] in ("list", "set") and field.runtime_options.get('list_values_provider'): list_values = field.runtime_options['list_values_provider'](self) # list_values must be a list of values or {"display_name": x, "value": y} objects # Furthermore, we need to convert all values to JSON since they could be of any type if list_values and isinstance(list_values[0], dict) and "display_name" in list_values[0]: # e.g. [ {"display_name": "Always", "value": "always"}, ... ] for entry in list_values: assert "display_name" in entry and "value" in entry entry["value"] = json.dumps(entry["value"]) else: # e.g. [1, 2, 3] - we need to convert it to the [{"display_name": x, "value": x}] format list_values = [json.dumps(val) for val in list_values] list_values = [{"display_name": text_type(val), "value": val} for val in list_values] info['list_values'] = list_values info['has_list_values'] = True return info
Create the information that the template needs to render a form field for this field.
def list_flavors(self, retrieve_all=True, **_params): """Fetches a list of all Neutron service flavors for a project.""" return self.list('flavors', self.flavors_path, retrieve_all, **_params)
Fetches a list of all Neutron service flavors for a project.
def gene_coordinates(host, org, gene, chr_exclude=[]) -> pd.DataFrame: """Retrieve gene coordinates for specific organism through BioMart. Parameters ---------- host : {{'www.ensembl.org', ...}} A valid BioMart host URL. Can be used to control genome build. org : {{'hsapiens', 'mmusculus', 'drerio'}} Organism to query. Currently available are human ('hsapiens'), mouse ('mmusculus') and zebrafish ('drerio'). gene : The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve coordinates. chr_exclude : A list of chromosomes to exclude from query. Returns ------- A `pd.DataFrame` containing gene coordinates for the specified gene symbol. """ try: from bioservices import biomart except ImportError: raise ImportError( 'You need to install the `bioservices` module.') from io import StringIO s = biomart.BioMart(host=host) # building query s.new_query() if org == 'hsapiens': s.add_dataset_to_xml('hsapiens_gene_ensembl') s.add_attribute_to_xml('hgnc_symbol') elif org == 'mmusculus': s.add_dataset_to_xml('mmusculus_gene_ensembl') s.add_attribute_to_xml('mgi_symbol') elif org == 'drerio': s.add_dataset_to_xml('drerio_gene_ensembl') s.add_attribute_to_xml('zfin_id_symbol') else: logg.msg('organism ', str(org), ' is unavailable', v=4, no_indent=True) return None s.add_attribute_to_xml('chromosome_name') s.add_attribute_to_xml('start_position') s.add_attribute_to_xml('end_position') xml = s.get_xml() # parsing gene coordinates res = pd.read_csv(StringIO(s.query(xml)), sep='\t', header=None) res.columns = ['symbol', 'chromosome_name', 'start', 'end'] res = res.dropna() res = res[~res['chromosome_name'].isin(chr_exclude)] res = res.set_index('symbol') return res.loc[[gene], :]
Retrieve gene coordinates for specific organism through BioMart. Parameters ---------- host : {{'www.ensembl.org', ...}} A valid BioMart host URL. Can be used to control genome build. org : {{'hsapiens', 'mmusculus', 'drerio'}} Organism to query. Currently available are human ('hsapiens'), mouse ('mmusculus') and zebrafish ('drerio'). gene : The gene symbol (e.g. 'hgnc_symbol' for human) for which to retrieve coordinates. chr_exclude : A list of chromosomes to exclude from query. Returns ------- A `pd.DataFrame` containing gene coordinates for the specified gene symbol.
def copy_config(self, original, new): ''' Copies collection configs into a new folder. Can be used to create new collections based on existing configs. Basically, copies all nodes under /configs/original to /configs/new. :param original str: ZK name of original config :param new str: New name of the ZK config. ''' if not self.kz.exists('/configs/{}'.format(original)): raise ZookeeperError("Collection doesn't exist in Zookeeper. Current Collections are: {}".format(self.kz.get_children('/configs'))) base = '/configs/{}'.format(original) nbase = '/configs/{}'.format(new) self._copy_dir(base, nbase)
Copies collection configs into a new folder. Can be used to create new collections based on existing configs. Basically, copies all nodes under /configs/original to /configs/new. :param original str: ZK name of original config :param new str: New name of the ZK config.
def email(value, allow_empty = False, **kwargs): """Validate that ``value`` is a valid email address. .. note:: Email address validation is...complicated. The methodology that we have adopted here is *generally* compliant with `RFC 5322 <https://tools.ietf.org/html/rfc5322>`_ and uses a combination of string parsing and regular expressions. String parsing in particular is used to validate certain *highly unusual* but still valid email patterns, including the use of escaped text and comments within an email address' local address (the user name part). This approach ensures more complete coverage for unusual edge cases, while still letting us use regular expressions that perform quickly. :param value: The value to validate. :type value: :class:`str <python:str>` / :obj:`None <python:None>` :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: :class:`str <python:str>` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` :raises CannotCoerceError: if ``value`` is not a :class:`str <python:str>` or :obj:`None <python:None>` :raises InvalidEmailError: if ``value`` is not a valid email address or empty with ``allow_empty`` set to ``True`` """ # pylint: disable=too-many-branches,too-many-statements,R0914 if not value and not allow_empty: raise errors.EmptyValueError('value (%s) was empty' % value) elif not value: return None if not isinstance(value, basestring): raise errors.CannotCoerceError('value must be a valid string, ' 'was %s' % type(value)) if '@' not in value: raise errors.InvalidEmailError('value (%s) is not a valid email address' % value) if '(' in value and ')' in value: open_parentheses = value.find('(') close_parentheses = value.find(')') + 1 if close_parentheses < open_parentheses: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) commented_value = value[open_parentheses:close_parentheses] value = value.replace(commented_value, '') elif '(' in value: raise errors.InvalidEmailError('value (%s) is not a valid email address' % value) elif ')' in value: raise errors.InvalidEmailError('value (%s) is not a valid email address' % value) if '<' in value or '>' in value: lt_position = value.find('<') gt_position = value.find('>') first_quote_position = -1 second_quote_position = -1 if lt_position >= 0: first_quote_position = value.find('"', 0, lt_position) if gt_position >= 0: second_quote_position = value.find('"', gt_position) if first_quote_position < 0 or second_quote_position < 0: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) at_count = value.count('@') if at_count > 1: last_at_position = 0 last_quote_position = 0 for x in range(0, at_count): # pylint: disable=W0612 at_position = value.find('@', last_at_position + 1) if at_position >= 0: first_quote_position = value.find('"', last_quote_position, at_position) second_quote_position = value.find('"', first_quote_position) if first_quote_position < 0 or second_quote_position < 0: raise errors.InvalidEmailError( 'value (%s) is not a valid email address' % value ) last_at_position = at_position last_quote_position = second_quote_position split_values = value.split('@') if len(split_values) < 2: raise errors.InvalidEmailError('value (%s) is not a valid email address' % value) local_value = ''.join(split_values[:-1]) domain_value = split_values[-1] is_domain = False is_ip = False try: if domain_value.startswith('[') and domain_value.endswith(']'): domain_value = domain_value[1:-1] domain(domain_value) is_domain = True except ValueError: is_domain = False if not is_domain: try: ip_address(domain_value, force_run = True) # pylint: disable=E1123 is_ip = True except ValueError: is_ip = False if not is_domain and is_ip: try: email(local_value + '@test.com', force_run = True) # pylint: disable=E1123 except ValueError: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) return value if not is_domain: raise errors.InvalidEmailError('value (%s) is not a valid email address' % value) else: is_valid = EMAIL_REGEX.search(value) if not is_valid: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) matched_string = is_valid.group(0) position = value.find(matched_string) if position > 0: prefix = value[:position] if prefix[0] in string_.punctuation: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) if '..' in prefix: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) end_of_match = position + len(matched_string) suffix = value[end_of_match:] if suffix: raise errors.InvalidEmailError('value (%s) is not a valid email ' 'address' % value) return value
Validate that ``value`` is a valid email address. .. note:: Email address validation is...complicated. The methodology that we have adopted here is *generally* compliant with `RFC 5322 <https://tools.ietf.org/html/rfc5322>`_ and uses a combination of string parsing and regular expressions. String parsing in particular is used to validate certain *highly unusual* but still valid email patterns, including the use of escaped text and comments within an email address' local address (the user name part). This approach ensures more complete coverage for unusual edge cases, while still letting us use regular expressions that perform quickly. :param value: The value to validate. :type value: :class:`str <python:str>` / :obj:`None <python:None>` :param allow_empty: If ``True``, returns :obj:`None <python:None>` if ``value`` is empty. If ``False``, raises a :class:`EmptyValueError <validator_collection.errors.EmptyValueError>` if ``value`` is empty. Defaults to ``False``. :type allow_empty: :class:`bool <python:bool>` :returns: ``value`` / :obj:`None <python:None>` :rtype: :class:`str <python:str>` / :obj:`None <python:None>` :raises EmptyValueError: if ``value`` is empty and ``allow_empty`` is ``False`` :raises CannotCoerceError: if ``value`` is not a :class:`str <python:str>` or :obj:`None <python:None>` :raises InvalidEmailError: if ``value`` is not a valid email address or empty with ``allow_empty`` set to ``True``
def _follow_link(self, value): '''Returns given `value` or, if it is a symlink, the `value` it names.''' seen_keys = set() while True: link_key = self._link_for_value(value) if not link_key: return value assert link_key not in seen_keys, 'circular symlink reference' seen_keys.add(link_key) value = super(SymlinkDatastore, self).get(link_key)
Returns given `value` or, if it is a symlink, the `value` it names.
def quote_string_if_needed(arg: str) -> str: """ Quotes a string if it contains spaces and isn't already quoted """ if is_quoted(arg) or ' ' not in arg: return arg if '"' in arg: quote = "'" else: quote = '"' return quote + arg + quote
Quotes a string if it contains spaces and isn't already quoted
def PDFEmitter(target, source, env): """Strips any .aux or .log files from the input source list. These are created by the TeX Builder that in all likelihood was used to generate the .dvi file we're using as input, and we only care about the .dvi file. """ def strip_suffixes(n): return not SCons.Util.splitext(str(n))[1] in ['.aux', '.log'] source = [src for src in source if strip_suffixes(src)] return (target, source)
Strips any .aux or .log files from the input source list. These are created by the TeX Builder that in all likelihood was used to generate the .dvi file we're using as input, and we only care about the .dvi file.
def pull_en_words() -> None: """ Fetches a repository containing English words. """ ENGLISH_WORDS_URL = "https://github.com/dwyl/english-words.git" en_words_path = Path(config.EN_WORDS_PATH) if not en_words_path.is_file(): subprocess.run(["git", "clone", ENGLISH_WORDS_URL, str(en_words_path.parent)])
Fetches a repository containing English words.
def map_indices_parent2child(child, parent_indices): """Map parent RTDCBase event indices to RTDC_Hierarchy Parameters ---------- parent: RTDC_Hierarchy hierarchy child parent_indices: 1d ndarray hierarchy parent (`child.hparent`) indices to map Returns ------- child_indices: 1d ndarray child indices """ parent = child.hparent # filters pf = parent.filter.all # indices in child child_indices = [] count = 0 for ii in range(len(pf)): if pf[ii]: # only append indices if they exist in child if ii in parent_indices: # current child event count is the child index child_indices.append(count) # increment child event count count += 1 return np.array(child_indices)
Map parent RTDCBase event indices to RTDC_Hierarchy Parameters ---------- parent: RTDC_Hierarchy hierarchy child parent_indices: 1d ndarray hierarchy parent (`child.hparent`) indices to map Returns ------- child_indices: 1d ndarray child indices
def atlasdb_init( path, zonefile_dir, db, peer_seeds, peer_blacklist, recover=False, validate=False): """ Set up the atlas node: * create the db if it doesn't exist * go through all the names and verify that we have the *current* zonefiles * if we don't, queue them for fetching. * set up the peer db @db should be an instance of BlockstackDB @initial_peers should be a list of URLs Return the newly-initialized peer table """ global ATLASDB_SQL peer_table = {} if os.path.exists( path ): log.debug("Atlas DB exists at %s" % path) con = atlasdb_open( path ) atlasdb_last_block = atlasdb_get_lastblock( con=con, path=path ) if atlasdb_last_block is None: atlasdb_last_block = FIRST_BLOCK_MAINNET log.debug("Synchronize zonefiles from %s to %s" % (atlasdb_last_block, db.lastblock) ) atlasdb_queue_zonefiles( con, db, atlasdb_last_block, zonefile_dir, recover=recover, validate=validate) log.debug("Refreshing seed peers") for peer in peer_seeds: # forcibly add seed peers atlasdb_add_peer( peer, con=con, peer_table=peer_table, ping_on_evict=False ) # re-try fetching zonefiles from storage if we don't have them yet atlasdb_reset_zonefile_tried_storage( con=con, path=path ) # load up peer table from the db log.debug("Loading peer table") peer_table = atlasdb_load_peer_table( con=con, path=path ) # cache zonefile inventory and count atlasdb_cache_zonefile_info( con=con ) con.close() else: log.debug("Initializing Atlas DB at %s" % path) lines = [l + ";" for l in ATLASDB_SQL.split(";")] con = sqlite3.connect( path, isolation_level=None ) for line in lines: db_query_execute(con, line, ()) con.row_factory = atlasdb_row_factory # populate from db log.debug("Queuing all zonefiles") atlasdb_queue_zonefiles( con, db, FIRST_BLOCK_MAINNET, zonefile_dir, recover=recover, validate=validate) log.debug("Adding seed peers") for peer in peer_seeds: atlasdb_add_peer( peer, con=con, peer_table=peer_table ) atlasdb_cache_zonefile_info( con=con ) con.close() log.debug("peer_table: {}".format(peer_table.keys())) # whitelist and blacklist for peer_url in peer_seeds: host, port = url_to_host_port( peer_url ) peer_hostport = "%s:%s" % (host, port) if peer_hostport not in peer_table.keys(): atlasdb_add_peer( peer_hostport, path=path, peer_table=peer_table ) log.debug("peer_table: {}".format(peer_table.keys())) peer_table[peer_hostport]['whitelisted'] = True for peer_url in peer_blacklist: host, port = url_to_host_port( peer_url ) peer_hostport = "%s:%s" % (host, port) if peer_hostport not in peer_table.keys(): atlasdb_add_peer( peer_hostport, path=path, peer_table=peer_table ) log.debug("peer_table: {}".format(peer_table.keys())) peer_table[peer_hostport]['blacklisted'] = True return peer_table
Set up the atlas node: * create the db if it doesn't exist * go through all the names and verify that we have the *current* zonefiles * if we don't, queue them for fetching. * set up the peer db @db should be an instance of BlockstackDB @initial_peers should be a list of URLs Return the newly-initialized peer table
def validateState(value, blank=False, strip=None, allowlistRegexes=None, blocklistRegexes=None, excMsg=None, returnStateName=False): """Raises ValidationException if value is not a USA state. Returns the capitalized state abbreviation, unless returnStateName is True in which case it returns the titlecased state name. * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. * returnStateName (bool): If True, the full state name is returned, i.e. 'California'. Otherwise, the abbreviation, i.e. 'CA'. Defaults to False. >>> import pysimplevalidate as pysv >>> pysv.validateState('tx') 'TX' >>> pysv.validateState('california') 'CA' >>> pysv.validateState('WASHINGTON') 'WA' >>> pysv.validateState('WASHINGTON', returnStateName=True) 'Washington' """ # TODO - note that this is USA-centric. I should work on trying to make this more international. # Validate parameters. _validateGenericParameters(blank=blank, strip=strip, allowlistRegexes=allowlistRegexes, blocklistRegexes=blocklistRegexes) returnNow, value = _prevalidationCheck(value, blank, strip, allowlistRegexes, blocklistRegexes, excMsg) if returnNow: return value if value.upper() in USA_STATES_UPPER.keys(): # check if value is a state abbreviation if returnStateName: return USA_STATES[value.upper()] # Return full state name. else: return value.upper() # Return abbreviation. elif value.title() in USA_STATES.values(): # check if value is a state name if returnStateName: return value.title() # Return full state name. else: return USA_STATES_REVERSED[value.title()] # Return abbreviation. _raiseValidationException(_('%r is not a state.') % (_errstr(value)), excMsg)
Raises ValidationException if value is not a USA state. Returns the capitalized state abbreviation, unless returnStateName is True in which case it returns the titlecased state name. * value (str): The value being validated as an email address. * blank (bool): If True, a blank string will be accepted. Defaults to False. * strip (bool, str, None): If None, whitespace is stripped from value. If a str, the characters in it are stripped from value. If False, nothing is stripped. * allowlistRegexes (Sequence, None): A sequence of regex str that will explicitly pass validation, even if they aren't numbers. * blocklistRegexes (Sequence, None): A sequence of regex str or (regex_str, response_str) tuples that, if matched, will explicitly fail validation. * excMsg (str): A custom message to use in the raised ValidationException. * returnStateName (bool): If True, the full state name is returned, i.e. 'California'. Otherwise, the abbreviation, i.e. 'CA'. Defaults to False. >>> import pysimplevalidate as pysv >>> pysv.validateState('tx') 'TX' >>> pysv.validateState('california') 'CA' >>> pysv.validateState('WASHINGTON') 'WA' >>> pysv.validateState('WASHINGTON', returnStateName=True) 'Washington'
def WaitForFlow(flow_urn, token=None, timeout=DEFAULT_TIMEOUT, max_sleep_time=1, min_sleep_time=0.2, dampening_multiplier=0.9): """Waits for a flow to finish, polling while we wait. Args: flow_urn: The urn of the flow to wait for. token: The datastore access token. timeout: How long to wait before giving up, usually because the client has gone away. max_sleep_time: The initial and longest time to wait in between polls. min_sleep_time: The final and shortest time to wait in between polls. dampening_multiplier: The current sleep time is multiplied by this number on each iteration. Controls how fast the polling reaches its minimum sleep time. You probably want this to be less than 1, unless you want to wait an increasing amount of time in between flows. Raises: IOError: If we time out while waiting for the client. """ start_time = time.time() sleep_time = max_sleep_time while True: # Reopen the AFF4Object to check if its status has changed, and also make # sure it's a flow. with aff4.FACTORY.Open( flow_urn, token=token, aff4_type=flow.GRRFlow) as flow_obj: # Stop if the flow is done or has timed out. if time.time() - start_time > timeout: logging.warning("Timed out after waiting %ss for %s!", timeout, flow_obj) raise IOError("Timed out trying to access client! Is it connected?") if not flow_obj.GetRunner().IsRunning(): break # Decrease the time we sleep each iteration. sleep_time = max(sleep_time * dampening_multiplier, min_sleep_time) time.sleep(sleep_time) logging.debug("Waiting for %s, sleeping for %.3fs", flow_obj, sleep_time)
Waits for a flow to finish, polling while we wait. Args: flow_urn: The urn of the flow to wait for. token: The datastore access token. timeout: How long to wait before giving up, usually because the client has gone away. max_sleep_time: The initial and longest time to wait in between polls. min_sleep_time: The final and shortest time to wait in between polls. dampening_multiplier: The current sleep time is multiplied by this number on each iteration. Controls how fast the polling reaches its minimum sleep time. You probably want this to be less than 1, unless you want to wait an increasing amount of time in between flows. Raises: IOError: If we time out while waiting for the client.
def combine_first(self, other): """ Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0 """ import pandas.core.computation.expressions as expressions def extract_values(arr): # Does two things: # 1. maybe gets the values from the Series / Index # 2. convert datelike to i8 if isinstance(arr, (ABCIndexClass, ABCSeries)): arr = arr._values if needs_i8_conversion(arr): if is_extension_array_dtype(arr.dtype): arr = arr.asi8 else: arr = arr.view('i8') return arr def combiner(x, y): mask = isna(x) if isinstance(mask, (ABCIndexClass, ABCSeries)): mask = mask._values x_values = extract_values(x) y_values = extract_values(y) # If the column y in other DataFrame is not in first DataFrame, # just return y_values. if y.name not in self.columns: return y_values return expressions.where(mask, y_values, x_values) return self.combine(other, combiner, overwrite=False)
Update null elements with value in the same location in `other`. Combine two DataFrame objects by filling null values in one DataFrame with non-null values from other DataFrame. The row and column indexes of the resulting DataFrame will be the union of the two. Parameters ---------- other : DataFrame Provided DataFrame to use to fill null values. Returns ------- DataFrame See Also -------- DataFrame.combine : Perform series-wise operation on two DataFrames using a given function. Examples -------- >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [None, 4]}) >>> df2 = pd.DataFrame({'A': [1, 1], 'B': [3, 3]}) >>> df1.combine_first(df2) A B 0 1.0 3.0 1 0.0 4.0 Null values still persist if the location of that null value does not exist in `other` >>> df1 = pd.DataFrame({'A': [None, 0], 'B': [4, None]}) >>> df2 = pd.DataFrame({'B': [3, 3], 'C': [1, 1]}, index=[1, 2]) >>> df1.combine_first(df2) A B C 0 NaN 4.0 NaN 1 0.0 3.0 1.0 2 NaN 3.0 1.0
def create_container(self, conf, detach, tty): """Create a single container""" name = conf.name image_name = conf.image_name if conf.tag is not NotSpecified: image_name = conf.image_name_with_tag container_name = conf.container_name with conf.assumed_role(): env = dict(e.pair for e in conf.env) binds = conf.volumes.binds command = conf.formatted_command volume_names = conf.volumes.volume_names volumes_from = list(conf.volumes.share_with_names) no_tty_option = conf.no_tty_option ports = [p.container_port.port_pair for p in conf.ports] port_bindings = self.exposed(conf.ports) uncreated = [] for name in binds: if not os.path.exists(name): log.info("Making volume for mounting\tvolume=%s", name) try: os.makedirs(name) except OSError as error: uncreated.append((name, error)) if uncreated: raise BadOption("Failed to create some volumes on the host", uncreated=uncreated) log.info("Creating container from %s\timage=%s\tcontainer_name=%s\ttty=%s", image_name, name, container_name, tty) if binds: log.info("\tUsing volumes\tvolumes=%s", volume_names) if env: log.info("\tUsing environment\tenv=%s", sorted(env.keys())) if ports: log.info("\tUsing ports\tports=%s", ports) if port_bindings: log.info("\tPort bindings: %s", port_bindings) if volumes_from: log.info("\tVolumes from: %s", volumes_from) host_config = conf.harpoon.docker_api.create_host_config( binds = binds , volumes_from = volumes_from , port_bindings = port_bindings , devices = conf.devices , lxc_conf = conf.lxc_conf , privileged = conf.privileged , restart_policy = conf.restart_policy , dns = conf.network.dns , dns_search = conf.network.dns_search , extra_hosts = conf.network.extra_hosts , network_mode = conf.network.network_mode , publish_all_ports = conf.network.publish_all_ports , cap_add = conf.cpu.cap_add , cap_drop = conf.cpu.cap_drop , mem_limit = conf.cpu.mem_limit , cpu_shares = conf.cpu.cpu_shares , cpuset_cpus = conf.cpu.cpuset_cpus , cpuset_mems = conf.cpu.cpuset_mems , memswap_limit = conf.cpu.memswap_limit , ulimits = conf.ulimits , read_only = conf.read_only_rootfs , log_config = conf.log_config , security_opt = conf.security_opt , **conf.other_options.host_config ) container_id = conf.harpoon.docker_api.create_container(image_name , name=container_name , detach=detach , command=command , volumes=volume_names , environment=env , tty = False if no_tty_option else tty , user = conf.user , ports = ports , stdin_open = tty , hostname = conf.network.hostname , domainname = conf.network.domainname , network_disabled = conf.network.disabled , host_config = host_config , **conf.other_options.create ) if isinstance(container_id, dict): if "errorDetail" in container_id: raise BadImage("Failed to create container", image=name, error=container_id["errorDetail"]) container_id = container_id["Id"] return container_id
Create a single container
def img2wav(path, min_x, max_x, min_y, max_y, window_size=3): """Generate 1-D data ``y=f(x)`` from a black/white image. Suppose we have an image like that: .. image:: images/waveform.png :align: center Put some codes:: >>> from weatherlab.math.img2waveform import img2wav >>> import matplotlib.pyplot as plt >>> x, y = img2wav(r"testdata\img2waveform\waveform.png", ... min_x=0.0, max_x=288, ... min_y=15.0, max_y=35.0, ... window_size=15) >>> plt.plot(x, y) >>> plt.show() Then you got nicely sampled data: .. image:: images\waveform_pyplot.png :align: center :param path: the image file path :type path: string :param min_x: minimum value of x axis :type min_x: number :param max_x: maximum value of x axis :type max_x: number :param min_y: minimum value of y axis :type min_y: number :param max_y: maximum value of y axis :type max_y: number :param window_size: the slide window :type window_size: int Note: In python, a numpy array that represent a image is from left to the right, top to the bottom, but in coordinate, it's from bottom to the top. So we use ::-1 for a reverse output """ image = Image.open(path).convert("L") matrix = np.array(image)[::-1] # you can customize the gray scale fix behavior to fit color image matrix[np.where(matrix >= 128)] = 255 matrix[np.where(matrix < 128)] = 0 tick_x = (max_x - min_x) / matrix.shape[1] tick_y = (max_y - min_y) / matrix.shape[0] x, y = list(), list() for i in range(matrix.shape[1]): window = expand_window( # slide margin window i, window_size, matrix.shape[1]) margin_dots_y_indices = np.where(matrix[:, window] == 0)[0] # if found at least one dots in margin if len(margin_dots_y_indices) > 0: x.append(min_x + (i + 1) * tick_x) y.append(min_y + margin_dots_y_indices.mean() * tick_y) return np.array(x), np.array(y)
Generate 1-D data ``y=f(x)`` from a black/white image. Suppose we have an image like that: .. image:: images/waveform.png :align: center Put some codes:: >>> from weatherlab.math.img2waveform import img2wav >>> import matplotlib.pyplot as plt >>> x, y = img2wav(r"testdata\img2waveform\waveform.png", ... min_x=0.0, max_x=288, ... min_y=15.0, max_y=35.0, ... window_size=15) >>> plt.plot(x, y) >>> plt.show() Then you got nicely sampled data: .. image:: images\waveform_pyplot.png :align: center :param path: the image file path :type path: string :param min_x: minimum value of x axis :type min_x: number :param max_x: maximum value of x axis :type max_x: number :param min_y: minimum value of y axis :type min_y: number :param max_y: maximum value of y axis :type max_y: number :param window_size: the slide window :type window_size: int Note: In python, a numpy array that represent a image is from left to the right, top to the bottom, but in coordinate, it's from bottom to the top. So we use ::-1 for a reverse output
def pformat(self, consumed_capacity=None): """ Pretty format for insertion into table pformat """ consumed_capacity = consumed_capacity or {} lines = [] parts = ["GLOBAL", self.index_type, "INDEX", self.name] if self.status != "ACTIVE": parts.insert(0, "[%s]" % self.status) lines.append(" ".join(parts)) lines.append(" items: {0:,} ({1:,} bytes)".format(self.item_count, self.size)) read = "Read: " + format_throughput( self.read_throughput, consumed_capacity.get("read") ) write = "Write: " + format_throughput( self.write_throughput, consumed_capacity.get("write") ) lines.append(" " + read + " " + write) lines.append(" " + self.hash_key.schema) if self.range_key is not None: lines.append(" " + self.range_key.schema) if self.includes is not None: keys = "[%s]" % ", ".join(("'%s'" % i for i in self.includes)) lines.append(" Projection: %s" % keys) return "\n".join(lines)
Pretty format for insertion into table pformat
async def cities(self, country: str, state: str) -> list: """Return a list of supported cities in a country/state.""" data = await self._request( 'get', 'cities', params={ 'state': state, 'country': country }) return [d['city'] for d in data['data']]
Return a list of supported cities in a country/state.
def get_objective_bank_lookup_session(self, *args, **kwargs): """Gets the OsidSession associated with the objective bank lookup service. return: (osid.learning.ObjectiveBankLookupSession) - an ObjectiveBankLookupSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_objective_bank_lookup() is false compliance: optional - This method must be implemented if supports_objective_bank_lookup() is true. """ if not self.supports_objective_bank_lookup(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() try: session = sessions.ObjectiveBankLookupSession(runtime=self._runtime) except AttributeError: raise OperationFailed() return session
Gets the OsidSession associated with the objective bank lookup service. return: (osid.learning.ObjectiveBankLookupSession) - an ObjectiveBankLookupSession raise: OperationFailed - unable to complete request raise: Unimplemented - supports_objective_bank_lookup() is false compliance: optional - This method must be implemented if supports_objective_bank_lookup() is true.
def tajima_d(ac, pos=None, start=None, stop=None, min_sites=3): """Calculate the value of Tajima's D over a given region. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. pos : array_like, int, shape (n_items,), optional Variant positions, using 1-based coordinates, in ascending order. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- D : float Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> allel.tajima_d(ac) 3.1445848780213814 >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> allel.tajima_d(ac, pos=pos, start=7, stop=25) 3.8779735196179366 """ # check inputs if not hasattr(ac, 'count_segregating'): ac = AlleleCountsArray(ac, copy=False) # deal with subregion if pos is not None and (start is not None or stop is not None): if not isinstance(pos, SortedIndex): pos = SortedIndex(pos, copy=False) loc = pos.locate_range(start, stop) ac = ac[loc] # count segregating variants S = ac.count_segregating() if S < min_sites: return np.nan # assume number of chromosomes sampled is constant for all variants n = ac.sum(axis=1).max() # (n-1)th harmonic number a1 = np.sum(1 / np.arange(1, n)) # calculate Watterson's theta (absolute value) theta_hat_w_abs = S / a1 # calculate mean pairwise difference mpd = mean_pairwise_difference(ac, fill=0) # calculate theta_hat pi (sum differences over variants) theta_hat_pi_abs = np.sum(mpd) # N.B., both theta estimates are usually divided by the number of # (accessible) bases but here we want the absolute difference d = theta_hat_pi_abs - theta_hat_w_abs # calculate the denominator (standard deviation) a2 = np.sum(1 / (np.arange(1, n)**2)) b1 = (n + 1) / (3 * (n - 1)) b2 = 2 * (n**2 + n + 3) / (9 * n * (n - 1)) c1 = b1 - (1 / a1) c2 = b2 - ((n + 2) / (a1 * n)) + (a2 / (a1**2)) e1 = c1 / a1 e2 = c2 / (a1**2 + a2) d_stdev = np.sqrt((e1 * S) + (e2 * S * (S - 1))) # finally calculate Tajima's D D = d / d_stdev return D
Calculate the value of Tajima's D over a given region. Parameters ---------- ac : array_like, int, shape (n_variants, n_alleles) Allele counts array. pos : array_like, int, shape (n_items,), optional Variant positions, using 1-based coordinates, in ascending order. start : int, optional The position at which to start (1-based). Defaults to the first position. stop : int, optional The position at which to stop (1-based). Defaults to the last position. min_sites : int, optional Minimum number of segregating sites for which to calculate a value. If there are fewer, np.nan is returned. Defaults to 3. Returns ------- D : float Examples -------- >>> import allel >>> g = allel.GenotypeArray([[[0, 0], [0, 0]], ... [[0, 0], [0, 1]], ... [[0, 0], [1, 1]], ... [[0, 1], [1, 1]], ... [[1, 1], [1, 1]], ... [[0, 0], [1, 2]], ... [[0, 1], [1, 2]], ... [[0, 1], [-1, -1]], ... [[-1, -1], [-1, -1]]]) >>> ac = g.count_alleles() >>> allel.tajima_d(ac) 3.1445848780213814 >>> pos = [2, 4, 7, 14, 15, 18, 19, 25, 27] >>> allel.tajima_d(ac, pos=pos, start=7, stop=25) 3.8779735196179366
def clean_by_request(self, request): ''' Remove all futures that were waiting for request `request` since it is done waiting ''' if request not in self.request_map: return for tag, matcher, future in self.request_map[request]: # timeout the future self._timeout_future(tag, matcher, future) # remove the timeout if future in self.timeout_map: tornado.ioloop.IOLoop.current().remove_timeout(self.timeout_map[future]) del self.timeout_map[future] del self.request_map[request]
Remove all futures that were waiting for request `request` since it is done waiting
def _get_signed_predecessors(im, node, polarity): """Get upstream nodes in the influence map. Return the upstream nodes along with the overall polarity of the path to that node by account for the polarity of the path to the given node and the polarity of the edge between the given node and its immediate predecessors. Parameters ---------- im : networkx.MultiDiGraph Graph containing the influence map. node : str The node (rule name) in the influence map to get predecessors (upstream nodes) for. polarity : int Polarity of the overall path to the given node. Returns ------- generator of tuples, (node, polarity) Each tuple returned contains two elements, a node (string) and the polarity of the overall path (int) to that node. """ signed_pred_list = [] for pred in im.predecessors(node): pred_edge = (pred, node) yield (pred, _get_edge_sign(im, pred_edge) * polarity)
Get upstream nodes in the influence map. Return the upstream nodes along with the overall polarity of the path to that node by account for the polarity of the path to the given node and the polarity of the edge between the given node and its immediate predecessors. Parameters ---------- im : networkx.MultiDiGraph Graph containing the influence map. node : str The node (rule name) in the influence map to get predecessors (upstream nodes) for. polarity : int Polarity of the overall path to the given node. Returns ------- generator of tuples, (node, polarity) Each tuple returned contains two elements, a node (string) and the polarity of the overall path (int) to that node.
def __get_switch_arr(work_sheet, row_num): ''' if valud of the column of the row is `1`, it will be added to the array. ''' u_dic = [] for col_idx in FILTER_COLUMNS: cell_val = work_sheet['{0}{1}'.format(col_idx, row_num)].value if cell_val in [1, '1']: # Appending the slug name of the switcher. u_dic.append(work_sheet['{0}1'.format(col_idx)].value.strip().split(',')[0]) return u_dic
if valud of the column of the row is `1`, it will be added to the array.
def add_node(self, name, desc, layout, node_x, node_y): """ Add a node to a network. """ existing_node = get_session().query(Node).filter(Node.name==name, Node.network_id==self.id).first() if existing_node is not None: raise HydraError("A node with name %s is already in network %s"%(name, self.id)) node = Node() node.name = name node.description = desc node.layout = str(layout) if layout is not None else None node.x = node_x node.y = node_y #Do not call save here because it is likely that we may want #to bulk insert nodes, not one at a time. get_session().add(node) self.nodes.append(node) return node
Add a node to a network.
def detect(filename, include_confidence=False): """ Detect the encoding of a file. Returns only the predicted current encoding as a string. If `include_confidence` is True, Returns tuple containing: (str encoding, float confidence) """ f = open(filename) detection = chardet.detect(f.read()) f.close() encoding = detection.get('encoding') confidence = detection.get('confidence') if include_confidence: return (encoding, confidence) return encoding
Detect the encoding of a file. Returns only the predicted current encoding as a string. If `include_confidence` is True, Returns tuple containing: (str encoding, float confidence)
def call_for_each_tower(self, tower_fn): """ Call the function `tower_fn` under :class:`TowerContext` for each tower. Returns: a list, contains the return values of `tower_fn` on each tower. """ ps_device = 'cpu' if len(self.towers) >= 4 else 'gpu' raw_devices = ['/gpu:{}'.format(k) for k in self.towers] if ps_device == 'gpu': devices = [LeastLoadedDeviceSetter(d, raw_devices) for d in raw_devices] else: devices = [tf.train.replica_device_setter( worker_device=d, ps_device='/cpu:0', ps_tasks=1) for d in raw_devices] return DataParallelBuilder.build_on_towers(self.towers, tower_fn, devices)
Call the function `tower_fn` under :class:`TowerContext` for each tower. Returns: a list, contains the return values of `tower_fn` on each tower.
def source(self, format='xml', accessible=False): """ Args: format (str): only 'xml' and 'json' source types are supported accessible (bool): when set to true, format is always 'json' """ if accessible: return self.http.get('/wda/accessibleSource').value return self.http.get('source?format='+format).value
Args: format (str): only 'xml' and 'json' source types are supported accessible (bool): when set to true, format is always 'json'
def find_layer_idx(model, layer_name): """Looks up the layer index corresponding to `layer_name` from `model`. Args: model: The `keras.models.Model` instance. layer_name: The name of the layer to lookup. Returns: The layer index if found. Raises an exception otherwise. """ layer_idx = None for idx, layer in enumerate(model.layers): if layer.name == layer_name: layer_idx = idx break if layer_idx is None: raise ValueError("No layer with name '{}' within the model".format(layer_name)) return layer_idx
Looks up the layer index corresponding to `layer_name` from `model`. Args: model: The `keras.models.Model` instance. layer_name: The name of the layer to lookup. Returns: The layer index if found. Raises an exception otherwise.
def create_dcnm_in_nwk(self, tenant_id, fw_dict, is_fw_virt=False): """Create the DCNM In Network and store the result in DB. """ tenant_name = fw_dict.get('tenant_name') ret = self._create_service_nwk(tenant_id, tenant_name, 'in') if ret: res = fw_const.DCNM_IN_NETWORK_CREATE_SUCCESS LOG.info("In Service network created for tenant %s", tenant_id) else: res = fw_const.DCNM_IN_NETWORK_CREATE_FAIL LOG.info("In Service network create failed for tenant %s", tenant_id) self.update_fw_db_result(tenant_id, dcnm_status=res) return ret
Create the DCNM In Network and store the result in DB.
def list_knowledge_bases(project_id): """Lists the Knowledge bases belonging to a project. Args: project_id: The GCP project linked with the agent.""" import dialogflow_v2beta1 as dialogflow client = dialogflow.KnowledgeBasesClient() project_path = client.project_path(project_id) print('Knowledge Bases for: {}'.format(project_id)) for knowledge_base in client.list_knowledge_bases(project_path): print(' - Display Name: {}'.format(knowledge_base.display_name)) print(' - Knowledge ID: {}\n'.format(knowledge_base.name))
Lists the Knowledge bases belonging to a project. Args: project_id: The GCP project linked with the agent.
def _is_viable_phone_number(number): """Checks to see if a string could possibly be a phone number. At the moment, checks to see that the string begins with at least 2 digits, ignoring any punctuation commonly found in phone numbers. This method does not require the number to be normalized in advance - but does assume that leading non-number symbols have been removed, such as by the method _extract_possible_number. Arguments: number -- string to be checked for viability as a phone number Returns True if the number could be a phone number of some sort, otherwise False """ if len(number) < _MIN_LENGTH_FOR_NSN: return False match = fullmatch(_VALID_PHONE_NUMBER_PATTERN, number) return bool(match)
Checks to see if a string could possibly be a phone number. At the moment, checks to see that the string begins with at least 2 digits, ignoring any punctuation commonly found in phone numbers. This method does not require the number to be normalized in advance - but does assume that leading non-number symbols have been removed, such as by the method _extract_possible_number. Arguments: number -- string to be checked for viability as a phone number Returns True if the number could be a phone number of some sort, otherwise False
def with_wrapper(self, wrapper=None, name=None): """ Copy this BarSet, and return a new BarSet with the specified name and wrapper. If no name is given, `{self.name}_custom_wrapper` is used. If no wrapper is given, the new BarSet will have no wrapper. """ name = name or '{}_custom_wrapper'.format(self.name) return self.__class__(self.data, name=name, wrapper=wrapper)
Copy this BarSet, and return a new BarSet with the specified name and wrapper. If no name is given, `{self.name}_custom_wrapper` is used. If no wrapper is given, the new BarSet will have no wrapper.
def _process_response(self, response, marker_elems=None): """ Helper to process the xml response from AWS """ body = response.read() #print body if '<Errors>' not in body: rs = ResultSet(marker_elems) h = handler.XmlHandler(rs, self) xml.sax.parseString(body, h) return rs else: raise MTurkRequestError(response.status, response.reason, body)
Helper to process the xml response from AWS
def add_vcenter(self, **kwargs): """ Add vCenter on the switch Args: id(str) : Name of an established vCenter url (bool) : vCenter URL username (str): Username of the vCenter password (str): Password of the vCenter callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None """ config = ET.Element("config") vcenter = ET.SubElement(config, "vcenter", xmlns="urn:brocade.com:mgmt:brocade-vswitch") id = ET.SubElement(vcenter, "id") id.text = kwargs.pop('id') credentials = ET.SubElement(vcenter, "credentials") url = ET.SubElement(credentials, "url") url.text = kwargs.pop('url') username = ET.SubElement(credentials, "username") username.text = kwargs.pop('username') password = ET.SubElement(credentials, "password") password.text = kwargs.pop('password') try: self._callback(config) return True except Exception as error: logging.error(error) return False
Add vCenter on the switch Args: id(str) : Name of an established vCenter url (bool) : vCenter URL username (str): Username of the vCenter password (str): Password of the vCenter callback (function): A function executed upon completion of the method. Returns: Return value of `callback`. Raises: None
def list_tags(self, pattern=None): """ List all tags made on this project. :param pattern: filters the starting letters of the return value :return: """ request_url = "{}tags".format(self.create_basic_url()) params = None if pattern: params = {'pattern': pattern} return_value = self._call_api(request_url, params=params) return return_value['tags']
List all tags made on this project. :param pattern: filters the starting letters of the return value :return:
def _send_request(url_id, data=None, json=None, req_type=None): """ Send request to Seeder's API. Args: url_id (str): ID used as identification in Seeder. data (obj, default None): Optional parameter for data. json (obj, default None): Optional parameter for JSON body. req_type (fn, default None): Request method used to send/download the data. If none, `requests.get` is used. Returns: dict: Data from Seeder. """ url = settings.SEEDER_INFO_URL % url_id if not req_type: req_type = requests.get resp = req_type( url, data=data, json=json, timeout=settings.SEEDER_TIMEOUT, headers={ "User-Agent": settings.USER_AGENT, "Authorization": settings.SEEDER_TOKEN, } ) resp.raise_for_status() data = resp.json() return data
Send request to Seeder's API. Args: url_id (str): ID used as identification in Seeder. data (obj, default None): Optional parameter for data. json (obj, default None): Optional parameter for JSON body. req_type (fn, default None): Request method used to send/download the data. If none, `requests.get` is used. Returns: dict: Data from Seeder.
def _is_possible_loh(rec, vcf_rec, params, somatic_info, use_status=False, max_normal_depth=None): """Check if the VCF record is a het in the normal with sufficient support. Only returns SNPs, since indels tend to have less precise frequency measurements. """ if _is_biallelic_snp(rec) and _passes_plus_germline(rec, use_status=use_status): stats = _tumor_normal_stats(rec, somatic_info, vcf_rec) depths = [tz.get_in([x, "depth"], stats) for x in ["normal", "tumor"]] depths = [d for d in depths if d is not None] normal_freq = tz.get_in(["normal", "freq"], stats) tumor_freq = tz.get_in(["tumor", "freq"], stats) if all([d > params["min_depth"] for d in depths]): if max_normal_depth and tz.get_in(["normal", "depth"], stats, 0) > max_normal_depth: return None if normal_freq is not None: if normal_freq >= params["min_freq"] and normal_freq <= params["max_freq"]: return stats elif (tumor_freq >= params["tumor_only"]["min_freq"] and tumor_freq <= params["tumor_only"]["max_freq"]): if (vcf_rec and not _has_population_germline(vcf_rec)) or is_population_germline(rec): return stats
Check if the VCF record is a het in the normal with sufficient support. Only returns SNPs, since indels tend to have less precise frequency measurements.
def _auto_unlock_key_position(self): """Find the open sesame password in the default keyring """ found_pos = None default_keyring_ids = gkr.list_item_ids_sync(self.default_keyring) for pos in default_keyring_ids: item_attrs = gkr.item_get_attributes_sync(self.default_keyring, pos) app = 'application' if item_attrs.has_key(app) and item_attrs[app] == "opensesame": found_pos = pos break return found_pos
Find the open sesame password in the default keyring
def namedb_get_names_owned_by_address( cur, address, current_block ): """ Get the list of non-expired, non-revoked names owned by an address. Only works if there is a *singular* address for the name. """ unexpired_fragment, unexpired_args = namedb_select_where_unexpired_names( current_block ) select_query = "SELECT name FROM name_records JOIN namespaces ON name_records.namespace_id = namespaces.namespace_id " + \ "WHERE name_records.address = ? AND name_records.revoked = 0 AND " + unexpired_fragment + ";" args = (address,) + unexpired_args name_rows = namedb_query_execute( cur, select_query, args ) names = [] for name_row in name_rows: names.append( name_row['name'] ) if len(names) == 0: return None else: return names
Get the list of non-expired, non-revoked names owned by an address. Only works if there is a *singular* address for the name.
def from_string(cls, string, relpath=None, encoding=None, is_sass=None): """Read Sass source from the contents of a string. The origin is always None. `relpath` defaults to "string:...". """ if isinstance(string, six.text_type): # Already decoded; we don't know what encoding to use for output, # though, so still check for a @charset. # TODO what if the given encoding conflicts with the one in the # file? do we care? if encoding is None: encoding = determine_encoding(string) byte_contents = string.encode(encoding) text_contents = string elif isinstance(string, six.binary_type): encoding = determine_encoding(string) byte_contents = string text_contents = string.decode(encoding) else: raise TypeError("Expected text or bytes, got {0!r}".format(string)) origin = None if relpath is None: m = hashlib.sha256() m.update(byte_contents) relpath = repr("string:{0}:{1}".format( m.hexdigest()[:16], text_contents[:100])) return cls( origin, relpath, text_contents, encoding=encoding, is_sass=is_sass, )
Read Sass source from the contents of a string. The origin is always None. `relpath` defaults to "string:...".
def calculate_token(self, text, seed=None): """ Calculate the request token (`tk`) of a string :param text: str The text to calculate a token for :param seed: str The seed to use. By default this is the number of hours since epoch """ if seed is None: seed = self._get_token_key() [first_seed, second_seed] = seed.split(".") try: d = bytearray(text.encode('UTF-8')) except UnicodeDecodeError: # This will probably only occur when d is actually a str containing UTF-8 chars, which means we don't need # to encode. d = bytearray(text) a = int(first_seed) for value in d: a += value a = self._work_token(a, self.SALT_1) a = self._work_token(a, self.SALT_2) a ^= int(second_seed) if 0 > a: a = (a & 2147483647) + 2147483648 a %= 1E6 a = int(a) return str(a) + "." + str(a ^ int(first_seed))
Calculate the request token (`tk`) of a string :param text: str The text to calculate a token for :param seed: str The seed to use. By default this is the number of hours since epoch
def open(self): """ Open the Sender using the supplied conneciton. If the handler has previously been redirected, the redirect context will be used to create a new handler before opening it. :param connection: The underlying client shared connection. :type: connection: ~uamqp.connection.Connection """ self.running = True if self.redirected: self.target = self.redirected.address self._handler = SendClient( self.target, auth=self.client.get_auth(), debug=self.client.debug, msg_timeout=self.timeout, error_policy=self.retry_policy, keep_alive_interval=self.keep_alive, client_name=self.name, properties=self.client.create_properties()) self._handler.open() while not self._handler.client_ready(): time.sleep(0.05)
Open the Sender using the supplied conneciton. If the handler has previously been redirected, the redirect context will be used to create a new handler before opening it. :param connection: The underlying client shared connection. :type: connection: ~uamqp.connection.Connection
def set_attributes_all(target, attributes, discard_others=True): """ Set Attributes in bulk and optionally discard others. Sets each Attribute in turn (modifying it in place if possible if it is already present) and optionally discarding all other Attributes not explicitly set. This function yields much greater performance than the required individual calls to ``set_attribute``, ``set_attribute_string``, ``set_attribute_string_array`` and ``del_attribute`` put together. .. versionadded:: 0.2 Parameters ---------- target : Dataset or Group Dataset or Group to set the Attributes of. attributes : dict The Attributes to set. The keys (``str``) are the names. The values are ``tuple`` of the Attribute kind and the value to set. Valid kinds are ``'string_array'``, ``'string'``, and ``'value'``. The values must correspond to what ``set_attribute_string_array``, ``set_attribute_string`` and ``set_attribute`` would take respectively. discard_others : bool, optional Whether to discard all other Attributes not explicitly set (default) or not. See Also -------- set_attribute set_attribute_string set_attribute_string_array """ attrs = target.attrs existing = dict(attrs.items()) # Generate special dtype for string arrays. if sys.hexversion >= 0x03000000: str_arr_dtype = h5py.special_dtype(vlen=str) else: str_arr_dtype = dtype=h5py.special_dtype(vlen=unicode) # Go through each attribute. If it is already present, modify it if # possible and create it otherwise (deletes old value.) for k, (kind, value) in attributes.items(): if kind == 'string_array': attrs.create(k, [convert_to_str(s) for s in value], dtype=str_arr_dtype) else: if kind == 'string': value = np.bytes_(value) if k not in existing: attrs.create(k, value) else: try: if value.dtype == existing[k].dtype \ and value.shape == existing[k].shape: attrs.modify(k, value) except: attrs.create(k, value) # Discard all other attributes. if discard_others: for k in set(existing) - set(attributes): del attrs[k]
Set Attributes in bulk and optionally discard others. Sets each Attribute in turn (modifying it in place if possible if it is already present) and optionally discarding all other Attributes not explicitly set. This function yields much greater performance than the required individual calls to ``set_attribute``, ``set_attribute_string``, ``set_attribute_string_array`` and ``del_attribute`` put together. .. versionadded:: 0.2 Parameters ---------- target : Dataset or Group Dataset or Group to set the Attributes of. attributes : dict The Attributes to set. The keys (``str``) are the names. The values are ``tuple`` of the Attribute kind and the value to set. Valid kinds are ``'string_array'``, ``'string'``, and ``'value'``. The values must correspond to what ``set_attribute_string_array``, ``set_attribute_string`` and ``set_attribute`` would take respectively. discard_others : bool, optional Whether to discard all other Attributes not explicitly set (default) or not. See Also -------- set_attribute set_attribute_string set_attribute_string_array
def complete_delivery_note(self, delivery_note_id, complete_dict): """ Completes an delivery note :param complete_dict: the complete dict with the template id :param delivery_note_id: the delivery note id :return: Response """ return self._create_put_request( resource=DELIVERY_NOTES, billomat_id=delivery_note_id, command=COMPLETE, send_data=complete_dict )
Completes an delivery note :param complete_dict: the complete dict with the template id :param delivery_note_id: the delivery note id :return: Response
def _dates(p_word_before_cursor): """ Generator for date completion. """ to_absolute = lambda s: relative_date_to_date(s).isoformat() start_value_pos = p_word_before_cursor.find(':') + 1 value = p_word_before_cursor[start_value_pos:] for reldate in date_suggestions(): if not reldate.startswith(value): continue yield Completion(reldate, -len(value), display_meta=to_absolute(reldate))
Generator for date completion.
def copy(self): """Create a shallow copy of the sorted set.""" return self._fromset(set(self._set), key=self._key)
Create a shallow copy of the sorted set.
def call_func(self, *args, **kwargs): """Called. Take care of exceptions using gather""" asyncio.gather( self.cron(*args, **kwargs), loop=self.loop, return_exceptions=True ).add_done_callback(self.set_result)
Called. Take care of exceptions using gather
def reduce_by(fn: Callable[[T1, T1], T1]) -> Callable[[ActualIterable[T1]], T1]: """ >>> from Redy.Collections import Traversal, Flow >>> def mul(a: int, b: int): return a * b >>> lst: Iterable[int] = [1, 2, 3] >>> x = Flow(lst)[Traversal.reduce_by(mul)].unbox >>> assert x is 6 """ return lambda collection: functools.reduce(fn, collection)
>>> from Redy.Collections import Traversal, Flow >>> def mul(a: int, b: int): return a * b >>> lst: Iterable[int] = [1, 2, 3] >>> x = Flow(lst)[Traversal.reduce_by(mul)].unbox >>> assert x is 6
def from_string(cls, s): """ Init a new object from a string. Args: s (string): raw email Returns: Instance of MailParser """ log.debug("Parsing email from string") message = email.message_from_string(s) return cls(message)
Init a new object from a string. Args: s (string): raw email Returns: Instance of MailParser
def boundingbox(self): """Compute the bounding box of the compound. Returns ------- mb.Box The bounding box for this Compound """ xyz = self.xyz return Box(mins=xyz.min(axis=0), maxs=xyz.max(axis=0))
Compute the bounding box of the compound. Returns ------- mb.Box The bounding box for this Compound
def decode_async_options(options): """Decode Async options from JSON decoding.""" async_options = copy.deepcopy(options) # JSON don't like datetimes. eta = async_options.get('task_args', {}).get('eta') if eta: from datetime import datetime async_options['task_args']['eta'] = datetime.fromtimestamp(eta) # If there are callbacks, reconstitute them. callbacks = async_options.get('callbacks', {}) if callbacks: async_options['callbacks'] = decode_callbacks(callbacks) if '__context_checker' in options: _checker = options['__context_checker'] async_options['_context_checker'] = path_to_reference(_checker) if '__process_results' in options: _processor = options['__process_results'] async_options['_process_results'] = path_to_reference(_processor) return async_options
Decode Async options from JSON decoding.
def findNodeById( self, objectId ): """ Looks up the node based on the unique node identifier. :param nodeId """ for item in self.items(): if ( isinstance(item, XNode) and item.objectId() == objectId): return item return None
Looks up the node based on the unique node identifier. :param nodeId
def stop_recording(self): """Stop recording from the audio source.""" self._stop_recording.set() with self._source_lock: self._source.stop() self._recording = False
Stop recording from the audio source.
def keyPressEvent( self, event ): """ Looks for the Esc key to close the popup. :param event | <QKeyEvent> """ if ( event.key() == Qt.Key_Escape ): self.reject() event.accept() return elif ( event.key() in (Qt.Key_Return, Qt.Key_Enter) ): if self._autoDefault: self.accept() event.accept() return super(XPopupWidget, self).keyPressEvent(event)
Looks for the Esc key to close the popup. :param event | <QKeyEvent>
def _get_match(self, host_object): """Get an item matching the given host object. The item may be either a parent domain or identical value. Parent domains and existing identical values always precede insertion point for given value - therefore, we treat an item just before insertion point as potential match. :param host_object: an object representing ip address or hostname whose match we are trying to find """ i = self._get_insertion_point(host_object) potential_match = None try: potential_match = self[i-1] except IndexError: pass if host_object.is_match(potential_match): return potential_match return None
Get an item matching the given host object. The item may be either a parent domain or identical value. Parent domains and existing identical values always precede insertion point for given value - therefore, we treat an item just before insertion point as potential match. :param host_object: an object representing ip address or hostname whose match we are trying to find
def get_begin_cursor(self, project_name, logstore_name, shard_id): """ Get begin cursor from log service for batch pull logs Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type shard_id: int :param shard_id: the shard id :return: GetLogsResponse :raise: LogException """ return self.get_cursor(project_name, logstore_name, shard_id, "begin")
Get begin cursor from log service for batch pull logs Unsuccessful opertaion will cause an LogException. :type project_name: string :param project_name: the Project name :type logstore_name: string :param logstore_name: the logstore name :type shard_id: int :param shard_id: the shard id :return: GetLogsResponse :raise: LogException
def main(): """I provide a command-line interface for this module """ print() print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-") print(lorem_gotham_title().center(50)) print("-~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~--~*~-") print() poem = lorem_gotham() for n in range(16): if n in (4, 8, 12): print() print(next(poem)) print()
I provide a command-line interface for this module
async def wait_loop(self): """ Waits on a loop for reactions to the message. This should not be called manually - it is handled by `send_to`. """ start, back, forward, end, close = self.emojis def check(payload: discord.RawReactionActionEvent): """ Checks if this reaction is related to the paginator interface. """ owner_check = not self.owner or payload.user_id == self.owner.id emoji = payload.emoji if isinstance(emoji, discord.PartialEmoji) and emoji.is_unicode_emoji(): emoji = emoji.name return payload.message_id == self.message.id and \ emoji and emoji in self.emojis and \ payload.user_id != self.bot.user.id and owner_check try: while not self.bot.is_closed(): payload = await self.bot.wait_for('raw_reaction_add', check=check, timeout=self.timeout) emoji = payload.emoji if isinstance(emoji, discord.PartialEmoji) and emoji.is_unicode_emoji(): emoji = emoji.name if emoji == close: await self.message.delete() return if emoji == start: self._display_page = 0 elif emoji == end: self._display_page = self.page_count - 1 elif emoji == back: self._display_page -= 1 elif emoji == forward: self._display_page += 1 self.bot.loop.create_task(self.update()) try: await self.message.remove_reaction(payload.emoji, discord.Object(id=payload.user_id)) except discord.Forbidden: pass except asyncio.TimeoutError: if self.delete_message: return await self.message.delete() for emoji in filter(None, self.emojis): try: await self.message.remove_reaction(emoji, self.message.guild.me) except (discord.Forbidden, discord.NotFound): pass
Waits on a loop for reactions to the message. This should not be called manually - it is handled by `send_to`.
def handle_typical_memberdefs_no_overload(self, signature, memberdef_nodes): """Produce standard documentation for memberdef_nodes.""" for n in memberdef_nodes: self.add_text(['\n', '%feature("docstring") ', signature, ' "', '\n']) if self.with_function_signature: self.add_line_with_subsequent_indent(self.get_function_signature(n)) self.subnode_parse(n, pieces=[], ignore=['definition', 'name']) self.add_text(['";', '\n'])
Produce standard documentation for memberdef_nodes.
def get_volumes_for_instance(self, arg, device=None): """ Return all EC2 Volume objects attached to ``arg`` instance name or ID. May specify ``device`` to limit to the (single) volume attached as that device. """ instance = self.get(arg) filters = {'attachment.instance-id': instance.id} if device is not None: filters['attachment.device'] = device return self.get_all_volumes(filters=filters)
Return all EC2 Volume objects attached to ``arg`` instance name or ID. May specify ``device`` to limit to the (single) volume attached as that device.
def get_definition(self): """ Add Definition to XMLBIF Return ------ dict: dict of type {variable: definition tag} Examples -------- >>> writer = XMLBIFWriter(model) >>> writer.get_definition() {'hear-bark': <Element DEFINITION at 0x7f1d48977408>, 'family-out': <Element DEFINITION at 0x7f1d489773c8>, 'dog-out': <Element DEFINITION at 0x7f1d48977388>, 'bowel-problem': <Element DEFINITION at 0x7f1d48977348>, 'light-on': <Element DEFINITION at 0x7f1d48977448>} """ cpds = self.model.get_cpds() cpds.sort(key=lambda x: x.variable) definition_tag = {} for cpd in cpds: definition_tag[cpd.variable] = etree.SubElement(self.network, "DEFINITION") etree.SubElement(definition_tag[cpd.variable], "FOR").text = cpd.variable for child in sorted(cpd.variables[:0:-1]): etree.SubElement(definition_tag[cpd.variable], "GIVEN").text = child return definition_tag
Add Definition to XMLBIF Return ------ dict: dict of type {variable: definition tag} Examples -------- >>> writer = XMLBIFWriter(model) >>> writer.get_definition() {'hear-bark': <Element DEFINITION at 0x7f1d48977408>, 'family-out': <Element DEFINITION at 0x7f1d489773c8>, 'dog-out': <Element DEFINITION at 0x7f1d48977388>, 'bowel-problem': <Element DEFINITION at 0x7f1d48977348>, 'light-on': <Element DEFINITION at 0x7f1d48977448>}
def scores(self, result, add_new_line=True): """Prints out the scores in a pretty format""" if result.goalsHomeTeam > result.goalsAwayTeam: homeColor, awayColor = (self.colors.WIN, self.colors.LOSE) elif result.goalsHomeTeam < result.goalsAwayTeam: homeColor, awayColor = (self.colors.LOSE, self.colors.WIN) else: homeColor = awayColor = self.colors.TIE click.secho('%-25s %2s' % (result.homeTeam, result.goalsHomeTeam), fg=homeColor, nl=False) click.secho(" vs ", nl=False) click.secho('%2s %s' % (result.goalsAwayTeam, result.awayTeam.rjust(25)), fg=awayColor, nl=add_new_line)
Prints out the scores in a pretty format
def set_default(nick, location, session, send, apikey): """Sets nick's default location to location.""" if valid_location(location, apikey): send("Setting default location") default = session.query(Weather_prefs).filter(Weather_prefs.nick == nick).first() if default is None: default = Weather_prefs(nick=nick, location=location) session.add(default) else: default.location = location else: send("Invalid or Ambiguous Location")
Sets nick's default location to location.
def get_external_subprocess_output(command_list, print_output=False, indent_string="", split_lines=True, ignore_called_process_errors=False, env=None): """Run the command and arguments in the command_list. Will search the system PATH. Returns the output as a list of lines. If print_output is True the output is echoed to stdout, indented (or otherwise prefixed) by indent_string. Waits for command completion. Called process errors can be set to be ignored if necessary.""" # Note ghostscript bounding box output writes to stderr! So we need to # be sure to capture the stderr along with the stdout. print_output = False # Useful for debugging to set True. use_popen = True # Needs to be True to set ignore_called_process_errors True if use_popen: # Use lower-level Popen call. p = subprocess.Popen(command_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env=env) output, errout = p.communicate() returncode = p.poll() if not ignore_called_process_errors and returncode != 0: raise subprocess.CalledProcessError(returncode, command_list, output=output) else: # Use a check_output call. # Note this does not work correctly if shell=True. output = subprocess.check_output(command_list, stderr=subprocess.STDOUT, shell=False, env=env) output = output.decode("utf-8") if split_lines or print_output: split_output = output.splitlines() if split_lines: output = split_output if print_output: print() for line in split_output: print(indent_string + line) sys.stdout.flush() return output
Run the command and arguments in the command_list. Will search the system PATH. Returns the output as a list of lines. If print_output is True the output is echoed to stdout, indented (or otherwise prefixed) by indent_string. Waits for command completion. Called process errors can be set to be ignored if necessary.
def get_current_temperature(self, refresh=False): """Get current temperature""" if refresh: self.refresh() try: return float(self.get_value('temperature')) except (TypeError, ValueError): return None
Get current temperature
def parse_skypos(ra, dec): """ Function to parse RA and Dec input values and turn them into decimal degrees Input formats could be: ["nn","nn","nn.nn"] "nn nn nn.nnn" "nn:nn:nn.nn" "nnH nnM nn.nnS" or "nnD nnM nn.nnS" nn.nnnnnnnn "nn.nnnnnnn" """ rval = make_val_float(ra) dval = make_val_float(dec) if rval is None: rval, dval = radec_hmstodd(ra, dec) return rval, dval
Function to parse RA and Dec input values and turn them into decimal degrees Input formats could be: ["nn","nn","nn.nn"] "nn nn nn.nnn" "nn:nn:nn.nn" "nnH nnM nn.nnS" or "nnD nnM nn.nnS" nn.nnnnnnnn "nn.nnnnnnn"
def from_bytes(cls, bitstream): ''' Parse the given record and update properties accordingly ''' record = cls() # Convert to ConstBitStream (if not already provided) if not isinstance(bitstream, ConstBitStream): if isinstance(bitstream, Bits): bitstream = ConstBitStream(auto=bitstream) else: bitstream = ConstBitStream(bytes=bitstream) # Read the record TTL record.ttl = bitstream.read('uint:32') # Store the locator record count until we need it locator_record_count = bitstream.read('uint:8') # Store the EID prefix mask length until we need it eid_prefix_len = bitstream.read('uint:8') # Read the Negative Map_Reply action record.action = bitstream.read('uint:3') # Read the flag record.authoritative = bitstream.read('bool') # Read reserved bits record._reserved1 = bitstream.read(12 + 4) # Read the map version record.map_version = bitstream.read('uint:12') # Read the EID prefix record.eid_prefix = read_afi_address_from_bitstream(bitstream, eid_prefix_len) # Read the locator records for dummy in range(locator_record_count): locator_record = LocatorRecord.from_bytes(bitstream) record.locator_records.append(locator_record) # Verify that the properties make sense record.sanitize() return record
Parse the given record and update properties accordingly
def on_network_adapter_change(self, network_adapter, change_adapter): """Triggered when settings of a network adapter of the associated virtual machine have changed. in network_adapter of type :class:`INetworkAdapter` in change_adapter of type bool raises :class:`VBoxErrorInvalidVmState` Session state prevents operation. raises :class:`VBoxErrorInvalidObjectState` Session type prevents operation. """ if not isinstance(network_adapter, INetworkAdapter): raise TypeError("network_adapter can only be an instance of type INetworkAdapter") if not isinstance(change_adapter, bool): raise TypeError("change_adapter can only be an instance of type bool") self._call("onNetworkAdapterChange", in_p=[network_adapter, change_adapter])
Triggered when settings of a network adapter of the associated virtual machine have changed. in network_adapter of type :class:`INetworkAdapter` in change_adapter of type bool raises :class:`VBoxErrorInvalidVmState` Session state prevents operation. raises :class:`VBoxErrorInvalidObjectState` Session type prevents operation.
def closeContentsWidget( self ): """ Closes the current contents widget. """ widget = self.currentContentsWidget() if ( not widget ): return widget.close() widget.setParent(None) widget.deleteLater()
Closes the current contents widget.
def NewFromLab(l, a, b, alpha=1.0, wref=_DEFAULT_WREF): '''Create a new instance based on the specifed CIE-LAB values. Parameters: :l: The L component [0...100] :a: The a component [-1...1] :b: The a component [-1...1] :alpha: The color transparency [0...1], default is opaque :wref: The whitepoint reference, default is 2° D65. Returns: A grapefruit.Color instance. >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692)) '(1, 0.5, 1.09491e-08, 1)' >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, wref=Color.WHITE_REFERENCE['std_D50'])) '(1.01238, 0.492011, -0.14311, 1)' >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, 0.5)) '(1, 0.5, 1.09491e-08, 0.5)' >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, 0.5, Color.WHITE_REFERENCE['std_D50'])) '(1.01238, 0.492011, -0.14311, 0.5)' ''' return Color(Color.XyzToRgb(*Color.LabToXyz(l, a, b, wref)), 'rgb', alpha, wref)
Create a new instance based on the specifed CIE-LAB values. Parameters: :l: The L component [0...100] :a: The a component [-1...1] :b: The a component [-1...1] :alpha: The color transparency [0...1], default is opaque :wref: The whitepoint reference, default is 2° D65. Returns: A grapefruit.Color instance. >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692)) '(1, 0.5, 1.09491e-08, 1)' >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, wref=Color.WHITE_REFERENCE['std_D50'])) '(1.01238, 0.492011, -0.14311, 1)' >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, 0.5)) '(1, 0.5, 1.09491e-08, 0.5)' >>> str(Color.NewFromLab(66.9518, 0.43084, 0.739692, 0.5, Color.WHITE_REFERENCE['std_D50'])) '(1.01238, 0.492011, -0.14311, 0.5)'
def write_csv(path, data): """This function writes comma-separated <data> to <path>. Parameter <path> is either a pathname or a file-like object that supports the |write()| method.""" fd = _try_open_file(path, 'w', 'The first argument must be a pathname or an object that supports write() method') for v in data: fd.write(",".join([str(x) for x in v])) fd.write("\n") _try_close_file(fd, path)
This function writes comma-separated <data> to <path>. Parameter <path> is either a pathname or a file-like object that supports the |write()| method.
def numRegisteredForRole(self, role, includeTemporaryRegs=False): ''' Accepts a DanceRole object and returns the number of registrations of that role. ''' count = self.eventregistration_set.filter(cancelled=False,dropIn=False,role=role).count() if includeTemporaryRegs: count += self.temporaryeventregistration_set.filter(dropIn=False,role=role).exclude( registration__expirationDate__lte=timezone.now()).count() return count
Accepts a DanceRole object and returns the number of registrations of that role.
def p_arr_assignment(p): """ statement : ARRAY_ID arg_list EQ expr | LET ARRAY_ID arg_list EQ expr """ i = 2 if p[1].upper() == 'LET' else 1 id_ = p[i] arg_list = p[i + 1] expr = p[i + 3] p[0] = None if arg_list is None or expr is None: return # There were errors entry = SYMBOL_TABLE.access_call(id_, p.lineno(i)) if entry is None: return if entry.type_ == TYPE.string: variable = gl.SYMBOL_TABLE.access_array(id_, p.lineno(i)) if len(variable.bounds) + 1 == len(arg_list): ss = arg_list.children.pop().value p[0] = make_array_substr_assign(p.lineno(i), id_, arg_list, (ss, ss), expr) return arr = make_array_access(id_, p.lineno(i), arg_list) if arr is None: return expr = make_typecast(arr.type_, expr, p.lineno(i)) if entry is None: return p[0] = make_sentence('LETARRAY', arr, expr)
statement : ARRAY_ID arg_list EQ expr | LET ARRAY_ID arg_list EQ expr
def evaluate_extracted_tokens(gold_content, extr_content): """ Evaluate the similarity between gold-standard and extracted content, typically for a single HTML document, as another way of evaluating the performance of an extractor model. Args: gold_content (str or Sequence[str]): Gold-standard content, either as a string or as an already-tokenized list of tokens. extr_content (str or Sequence[str]): Extracted content, either as a string or as an already-tokenized list of tokens. Returns: Dict[str, float] """ if isinstance(gold_content, string_): gold_content = simple_tokenizer(gold_content) if isinstance(extr_content, string_): extr_content = simple_tokenizer(extr_content) gold_set = set(gold_content) extr_set = set(extr_content) jaccard = len(gold_set & extr_set) / len(gold_set | extr_set) levenshtein = dameraulevenshtein(gold_content, extr_content) return {'jaccard': jaccard, 'levenshtein': levenshtein}
Evaluate the similarity between gold-standard and extracted content, typically for a single HTML document, as another way of evaluating the performance of an extractor model. Args: gold_content (str or Sequence[str]): Gold-standard content, either as a string or as an already-tokenized list of tokens. extr_content (str or Sequence[str]): Extracted content, either as a string or as an already-tokenized list of tokens. Returns: Dict[str, float]
def _get_repr_list(self): """ Get some representation data common to all HDU types """ spacing = ' '*2 text = [''] text.append("%sfile: %s" % (spacing, self._filename)) text.append("%sextension: %d" % (spacing, self._info['hdunum']-1)) text.append( "%stype: %s" % (spacing, _hdu_type_map[self._info['hdutype']])) extname = self.get_extname() if extname != "": text.append("%sextname: %s" % (spacing, extname)) extver = self.get_extver() if extver != 0: text.append("%sextver: %s" % (spacing, extver)) return text, spacing
Get some representation data common to all HDU types
def get_ip_interface_input_request_type_get_request_interface_type(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") get_ip_interface = ET.Element("get_ip_interface") config = get_ip_interface input = ET.SubElement(get_ip_interface, "input") request_type = ET.SubElement(input, "request-type") get_request = ET.SubElement(request_type, "get-request") interface_type = ET.SubElement(get_request, "interface-type") interface_type.text = kwargs.pop('interface_type') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def _band8(ins): """ Pops top 2 operands out of the stack, and does 1st AND (bitwise) 2nd operand (top of the stack), pushes the result. 8 bit un/signed version """ op1, op2 = tuple(ins.quad[2:]) if _int_ops(op1, op2) is not None: op1, op2 = _int_ops(op1, op2) output = _8bit_oper(op1) if op2 == 0xFF: # X & 0xFF = X output.append('push af') return output if op2 == 0: # X and 0 = 0 output.append('xor a') output.append('push af') return output op1, op2 = tuple(ins.quad[2:]) output = _8bit_oper(op1, op2) output.append('and h') output.append('push af') return output
Pops top 2 operands out of the stack, and does 1st AND (bitwise) 2nd operand (top of the stack), pushes the result. 8 bit un/signed version
def load_time_series(filename, delimiter=r'\s+'): r"""Import a time series from an annotation file. The file should consist of two columns of numeric values corresponding to the time and value of each sample of the time series. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- times : np.ndarray array of timestamps (float) values : np.ndarray array of corresponding numeric values (float) """ # Use our universal function to load in the events times, values = load_delimited(filename, [float, float], delimiter) times = np.array(times) values = np.array(values) return times, values
r"""Import a time series from an annotation file. The file should consist of two columns of numeric values corresponding to the time and value of each sample of the time series. Parameters ---------- filename : str Path to the annotation file delimiter : str Separator regular expression. By default, lines will be split by any amount of whitespace. Returns ------- times : np.ndarray array of timestamps (float) values : np.ndarray array of corresponding numeric values (float)
def start_background_task(self, target, *args, **kwargs): """Start a background task using the appropriate async model. This is a utility function that applications can use to start a background task using the method that is compatible with the selected async mode. :param target: the target function to execute. :param args: arguments to pass to the function. :param kwargs: keyword arguments to pass to the function. The return value is a ``asyncio.Task`` object. """ return asyncio.ensure_future(target(*args, **kwargs))
Start a background task using the appropriate async model. This is a utility function that applications can use to start a background task using the method that is compatible with the selected async mode. :param target: the target function to execute. :param args: arguments to pass to the function. :param kwargs: keyword arguments to pass to the function. The return value is a ``asyncio.Task`` object.
def precmd(self, line): """Handle alias expansion and ';;' separator.""" if not line.strip(): return line args = line.split() while args[0] in self.aliases: line = self.aliases[args[0]] ii = 1 for tmpArg in args[1:]: line = line.replace("%" + str(ii), tmpArg) ii += 1 line = line.replace("%*", ' '.join(args[1:])) args = line.split() # split into ';;' separated commands # unless it's an alias command if args[0] != 'alias': marker = line.find(';;') if marker >= 0: # queue up everything after marker next = line[marker+2:].lstrip() self.cmdqueue.append(next) line = line[:marker].rstrip() return line
Handle alias expansion and ';;' separator.
def tidyHTML(dirtyHTML): """ Runs an arbitrary HTML string through Tidy. """ try: from tidylib import tidy_document except ImportError as e: raise ImportError(("%s\nYou need to install pytidylib.\n" + "e.g. sudo pip install pytidylib") % e) options = { 'output-xhtml':1, #add_xml_decl=1,#option in tidy but not pytidylib 'indent':1, 'tidy-mark':1, #'char-encoding':'utf8', 'char-encoding':'raw', } html, errors = tidy_document(dirtyHTML, options=options) return html
Runs an arbitrary HTML string through Tidy.
def quoted(s): """ quotes a string if necessary. """ # strip any existing quotes s = s.strip(u'"') # don't add quotes for minus or leading space if s[0] in (u'-', u' '): return s if u' ' in s or u'/' in s: return u'"%s"' % s else: return s
quotes a string if necessary.
def mount(self, volume): """Performs mount actions on a LVM. Scans for active volume groups from the loopback device, activates it and fills :attr:`volumes` with the logical volumes. :raises NoLoopbackAvailableError: when no loopback was available :raises IncorrectFilesystemError: when the volume is not a volume group """ os.environ['LVM_SUPPRESS_FD_WARNINGS'] = '1' # find free loopback device volume._find_loopback() time.sleep(0.2) try: # Scan for new lvm volumes result = _util.check_output_(["lvm", "pvscan"]) for l in result.splitlines(): if volume.loopback in l or (volume.offset == 0 and volume.get_raw_path() in l): for vg in re.findall(r'VG (\S+)', l): volume.info['volume_group'] = vg if not volume.info.get('volume_group'): logger.warning("Volume is not a volume group. (Searching for %s)", volume.loopback) raise IncorrectFilesystemError() # Enable lvm volumes _util.check_call_(["lvm", "vgchange", "-a", "y", volume.info['volume_group']], stdout=subprocess.PIPE) except Exception: volume._free_loopback() raise volume.volumes.vstype = 'lvm' # fills it up. for _ in volume.volumes.detect_volumes('lvm'): pass
Performs mount actions on a LVM. Scans for active volume groups from the loopback device, activates it and fills :attr:`volumes` with the logical volumes. :raises NoLoopbackAvailableError: when no loopback was available :raises IncorrectFilesystemError: when the volume is not a volume group
def repl_member_add(self, params): """create new mongod instances and add it to the replica set. Args: params - mongod params return True if operation success otherwise False """ repl_config = self.config member_id = max([member['_id'] for member in repl_config['members']]) + 1 member_config = self.member_create(params, member_id) repl_config['members'].append(member_config) if not self.repl_update(repl_config): self.member_del(member_id, reconfig=True) raise ReplicaSetError("Could not add member to ReplicaSet.") return member_id
create new mongod instances and add it to the replica set. Args: params - mongod params return True if operation success otherwise False
def ub_to_str(string): """ converts py2 unicode / py3 bytestring into str Args: string (unicode, byte_string): string to be converted Returns: (str) """ if not isinstance(string, str): if six.PY2: return str(string) else: return string.decode() return string
converts py2 unicode / py3 bytestring into str Args: string (unicode, byte_string): string to be converted Returns: (str)
def unsupported_media_type(content_type): """ Creates a Lambda Service UnsupportedMediaType Response Parameters ---------- content_type str Content Type of the request that was made Returns ------- Flask.Response A response object representing the UnsupportedMediaType Error """ exception_tuple = LambdaErrorResponses.UnsupportedMediaTypeException return BaseLocalService.service_response( LambdaErrorResponses._construct_error_response_body(LambdaErrorResponses.USER_ERROR, "Unsupported content type: {}".format(content_type)), LambdaErrorResponses._construct_headers(exception_tuple[0]), exception_tuple[1] )
Creates a Lambda Service UnsupportedMediaType Response Parameters ---------- content_type str Content Type of the request that was made Returns ------- Flask.Response A response object representing the UnsupportedMediaType Error
def change_state_id(self, state_id=None): """ Changes the id of the state to a new id. This functions replaces the old state_id with the new state_id in all data flows and transitions. :param state_id: The new state if of the state """ old_state_id = self.state_id super(ContainerState, self).change_state_id(state_id) # Use private variables to change ids to prevent validity checks # change id in all transitions for transition in self.transitions.values(): if transition.from_state == old_state_id: transition._from_state = self.state_id if transition.to_state == old_state_id: transition._to_state = self.state_id # change id in all data_flows for data_flow in self.data_flows.values(): if data_flow.from_state == old_state_id: data_flow._from_state = self.state_id if data_flow.to_state == old_state_id: data_flow._to_state = self.state_id
Changes the id of the state to a new id. This functions replaces the old state_id with the new state_id in all data flows and transitions. :param state_id: The new state if of the state
def _trim(self): """ Removes oldest backups that exceed the limit configured in backup_count option. Does not write back to file system, make sure to call self._write() afterwards. """ index = self._get_index() backup_limit = config().backup_count() - 1 for changeset in index[backup_limit:]: self.delete(changeset[0], p_write=False)
Removes oldest backups that exceed the limit configured in backup_count option. Does not write back to file system, make sure to call self._write() afterwards.
def __parseResponseServer(self): """Parses the response of the server. Exception --------- A Sitools2Exception is raised when the server does not send back a success.""" self.__logger.debug(Sitools2Abstract.getBaseUrl(self) + SITools2Instance.PROJECTS_URI) result = Util.retrieveJsonResponseFromServer(Sitools2Abstract.getBaseUrl(self) + SITools2Instance.PROJECTS_URI) isSuccess = result['success'] if isSuccess: data = result['data'] self.__logger.debug(data) for i, dataItem in enumerate(data): project = Project(Sitools2Abstract.getBaseUrl(self), dataItem) self.__projects.append(project) else: raise Sitools2Exception("Error when loading the server response")
Parses the response of the server. Exception --------- A Sitools2Exception is raised when the server does not send back a success.
async def AddCharm(self, channel, url): ''' channel : str url : str Returns -> None ''' # map input types to rpc msg _params = dict() msg = dict(type='Client', request='AddCharm', version=1, params=_params) _params['channel'] = channel _params['url'] = url reply = await self.rpc(msg) return reply
channel : str url : str Returns -> None
def _check_endings(self): """Check begin/end of slug, raises Error if malformed.""" if self.slug.startswith("/") and self.slug.endswith("/"): raise InvalidSlugError( _("Invalid slug. Did you mean {}, without the leading and trailing slashes?".format(self.slug.strip("/")))) elif self.slug.startswith("/"): raise InvalidSlugError( _("Invalid slug. Did you mean {}, without the leading slash?".format(self.slug.strip("/")))) elif self.slug.endswith("/"): raise InvalidSlugError( _("Invalid slug. Did you mean {}, without the trailing slash?".format(self.slug.strip("/"))))
Check begin/end of slug, raises Error if malformed.
def get_missing_bins(original, trimmed): """Retrieve indices of a trimmed matrix with respect to the original matrix. Fairly fast but is only correct if diagonal values are different, which is always the case in practice. """ original_diag = np.diag(original) trimmed_diag = np.diag(trimmed) index = [] m = min(original.shape) for j in range(min(trimmed.shape)): k = 0 while original_diag[j + k] != trimmed_diag[j] and k < 2 * m: k += 1 index.append(k + j) return np.array(index)
Retrieve indices of a trimmed matrix with respect to the original matrix. Fairly fast but is only correct if diagonal values are different, which is always the case in practice.