positive
stringlengths
100
30.3k
anchor
stringlengths
1
15k
def get_or_create_environment(self, repo: str, branch: str, git_repo: Repo, repo_path: Path) -> str: """ Returns the path to the current Python executable. """ return sys.executable
Returns the path to the current Python executable.
def commit( self, confirm=False, confirm_delay=None, check=False, comment="", and_quit=False, delay_factor=1, ): """ Commit the candidate configuration. Commit the entered configuration. Raise an error and return the failure if the commit fails. Automatically enters configuration mode default: command_string = commit check and (confirm or confirm_dely or comment): Exception confirm_delay and no confirm: Exception confirm: confirm_delay option comment option command_string = commit confirmed or commit confirmed <confirm_delay> check: command_string = commit check """ delay_factor = self.select_delay_factor(delay_factor) if check and (confirm or confirm_delay or comment): raise ValueError("Invalid arguments supplied with commit check") if confirm_delay and not confirm: raise ValueError( "Invalid arguments supplied to commit method both confirm and check" ) # Select proper command string based on arguments provided command_string = "commit" commit_marker = "Commit complete." if check: command_string = "commit check" commit_marker = "Validation complete" elif confirm: if confirm_delay: command_string = "commit confirmed " + str(confirm_delay) else: command_string = "commit confirmed" commit_marker = "commit confirmed will be automatically rolled back in" # wrap the comment in quotes if comment: if '"' in comment: raise ValueError("Invalid comment contains double quote") comment = '"{0}"'.format(comment) command_string += " comment " + comment if and_quit: command_string += " and-quit" # Enter config mode (if necessary) output = self.config_mode() # and_quit will get out of config mode on commit if and_quit: prompt = self.base_prompt output += self.send_command_expect( command_string, expect_string=prompt, strip_prompt=True, strip_command=True, delay_factor=delay_factor, ) else: output += self.send_command_expect( command_string, strip_prompt=True, strip_command=True, delay_factor=delay_factor, ) if commit_marker not in output: raise ValueError( "Commit failed with the following errors:\n\n{0}".format(output) ) return output
Commit the candidate configuration. Commit the entered configuration. Raise an error and return the failure if the commit fails. Automatically enters configuration mode default: command_string = commit check and (confirm or confirm_dely or comment): Exception confirm_delay and no confirm: Exception confirm: confirm_delay option comment option command_string = commit confirmed or commit confirmed <confirm_delay> check: command_string = commit check
def _do_load( self, data, many=None, partial=None, unknown=None, postprocess=True, ): """Deserialize `data`, returning the deserialized result. :param data: The data to deserialize. :param bool many: Whether to deserialize `data` as a collection. If `None`, the value for `self.many` is used. :param bool|tuple partial: Whether to validate required fields. If its value is an iterable, only fields listed in that iterable will be ignored will be allowed missing. If `True`, all fields will be allowed missing. If `None`, the value for `self.partial` is used. :param unknown: Whether to exclude, include, or raise an error for unknown fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`. If `None`, the value for `self.unknown` is used. :param bool postprocess: Whether to run post_load methods.. :return: A dict of deserialized data :rtype: dict """ error_store = ErrorStore() errors = {} many = self.many if many is None else bool(many) unknown = unknown or self.unknown if partial is None: partial = self.partial # Run preprocessors if self._has_processors(PRE_LOAD): try: processed_data = self._invoke_load_processors( PRE_LOAD, data, many, original_data=data, ) except ValidationError as err: errors = err.normalized_messages() result = None else: processed_data = data if not errors: # Deserialize data result = self._deserialize( processed_data, self.fields, error_store, many=many, partial=partial, unknown=unknown, dict_class=self.dict_class, index_errors=self.opts.index_errors, ) # Run field-level validation self._invoke_field_validators(error_store, data=result, many=many) # Run schema-level validation if self._has_processors(VALIDATES_SCHEMA): field_errors = bool(error_store.errors) self._invoke_schema_validators( error_store, pass_many=True, data=result, original_data=data, many=many, field_errors=field_errors, ) self._invoke_schema_validators( error_store, pass_many=False, data=result, original_data=data, many=many, field_errors=field_errors, ) errors = error_store.errors # Run post processors if not errors and postprocess and self._has_processors(POST_LOAD): try: result = self._invoke_load_processors( POST_LOAD, result, many, original_data=data, ) except ValidationError as err: errors = err.normalized_messages() if errors: exc = ValidationError( errors, data=data, valid_data=result, ) self.handle_error(exc, data) raise exc return result
Deserialize `data`, returning the deserialized result. :param data: The data to deserialize. :param bool many: Whether to deserialize `data` as a collection. If `None`, the value for `self.many` is used. :param bool|tuple partial: Whether to validate required fields. If its value is an iterable, only fields listed in that iterable will be ignored will be allowed missing. If `True`, all fields will be allowed missing. If `None`, the value for `self.partial` is used. :param unknown: Whether to exclude, include, or raise an error for unknown fields in the data. Use `EXCLUDE`, `INCLUDE` or `RAISE`. If `None`, the value for `self.unknown` is used. :param bool postprocess: Whether to run post_load methods.. :return: A dict of deserialized data :rtype: dict
def _where(self, filter_fn): ''' use this to filter VLists, simply provide a filter function to filter the current found objects ''' assert callable(filter_fn), 'filter_fn needs to be callable' return VList(i for i in self if filter_fn(i()))
use this to filter VLists, simply provide a filter function to filter the current found objects
def dump_model(self, num_iteration=None, start_iteration=0): """Dump Booster to JSON format. Parameters ---------- num_iteration : int or None, optional (default=None) Index of the iteration that should be dumped. If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped. If <= 0, all iterations are dumped. start_iteration : int, optional (default=0) Start index of the iteration that should be dumped. Returns ------- json_repr : dict JSON format of Booster. """ if num_iteration is None: num_iteration = self.best_iteration buffer_len = 1 << 20 tmp_out_len = ctypes.c_int64(0) string_buffer = ctypes.create_string_buffer(buffer_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterDumpModel( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), ctypes.c_int64(buffer_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) actual_len = tmp_out_len.value # if buffer length is not long enough, reallocate a buffer if actual_len > buffer_len: string_buffer = ctypes.create_string_buffer(actual_len) ptr_string_buffer = ctypes.c_char_p(*[ctypes.addressof(string_buffer)]) _safe_call(_LIB.LGBM_BoosterDumpModel( self.handle, ctypes.c_int(start_iteration), ctypes.c_int(num_iteration), ctypes.c_int64(actual_len), ctypes.byref(tmp_out_len), ptr_string_buffer)) ret = json.loads(string_buffer.value.decode()) ret['pandas_categorical'] = json.loads(json.dumps(self.pandas_categorical, default=json_default_with_numpy)) return ret
Dump Booster to JSON format. Parameters ---------- num_iteration : int or None, optional (default=None) Index of the iteration that should be dumped. If None, if the best iteration exists, it is dumped; otherwise, all iterations are dumped. If <= 0, all iterations are dumped. start_iteration : int, optional (default=0) Start index of the iteration that should be dumped. Returns ------- json_repr : dict JSON format of Booster.
def replace(oldEl, newEl): # type: (Union[Rule, _RuleConnectable], Union[Rule, _RuleConnectable]) -> Union[Rule, _RuleConnectable] """ Replace element in the parsed tree. Can be nonterminal, terminal or rule. :param oldEl: Element already in the tree. :param newEl: Element to replace with. :return: New element attached to the tree. """ if isinstance(oldEl, Rule): return Manipulations.replaceRule(oldEl, newEl) if isinstance(oldEl, (Nonterminal, Terminal)): return Manipulations.replaceNode(oldEl, newEl)
Replace element in the parsed tree. Can be nonterminal, terminal or rule. :param oldEl: Element already in the tree. :param newEl: Element to replace with. :return: New element attached to the tree.
def _set_flag(self, flag): """Turns the specified flag on""" self.folder._invalidate_cache() # TODO::: turn the flag off when it's already on def replacer(m): return "%s/%s.%s%s" % ( joinpath(self.folder.base, self.folder.folder, "cur"), m.group("key"), m.group("hostname"), ":2,%s" % ( "%s%s" % (m.group("flags"), flag) if m.group("flags") \ else flag ) ) newfilename = self.msgpathre.sub(replacer, self.filename) self.filesystem.rename(self.filename, newfilename) self.filename = newfilename
Turns the specified flag on
def nucleotide_linkage(residues): """Support for DNA/RNA ligands by finding missing covalent linkages to stitch DNA/RNA together.""" nuc_covalent = [] ####################################### # Basic support for RNA/DNA as ligand # ####################################### nucleotides = ['A', 'C', 'T', 'G', 'U', 'DA', 'DC', 'DT', 'DG', 'DU'] dna_rna = {} # Dictionary of DNA/RNA residues by chain covlinkage = namedtuple("covlinkage", "id1 chain1 pos1 conf1 id2 chain2 pos2 conf2") # Create missing covlinkage entries for DNA/RNA for ligand in residues: resname, chain, pos = ligand if resname in nucleotides: if chain not in dna_rna: dna_rna[chain] = [(resname, pos), ] else: dna_rna[chain].append((resname, pos)) for chain in dna_rna: nuc_list = dna_rna[chain] for i, nucleotide in enumerate(nuc_list): if not i == len(nuc_list) - 1: name, pos = nucleotide nextnucleotide = nuc_list[i + 1] nextname, nextpos = nextnucleotide newlink = covlinkage(id1=name, chain1=chain, pos1=pos, conf1='', id2=nextname, chain2=chain, pos2=nextpos, conf2='') nuc_covalent.append(newlink) return nuc_covalent
Support for DNA/RNA ligands by finding missing covalent linkages to stitch DNA/RNA together.
def _xread(self, streams, timeout=0, count=None, latest_ids=None): """Wraps up common functionality between ``xread()`` and ``xread_group()`` You should probably be using ``xread()`` or ``xread_group()`` directly. """ if latest_ids is None: latest_ids = ['$'] * len(streams) if len(streams) != len(latest_ids): raise ValueError( 'The streams and latest_ids parameters must be of the ' 'same length' ) count_args = [b'COUNT', count] if count else [] if timeout is None: block_args = [] elif not isinstance(timeout, int): raise TypeError( "timeout argument must be int, not {!r}".format(timeout)) else: block_args = [b'BLOCK', timeout] return block_args + count_args + [b'STREAMS'] + streams + latest_ids
Wraps up common functionality between ``xread()`` and ``xread_group()`` You should probably be using ``xread()`` or ``xread_group()`` directly.
def decompress_from_curve(self, x, flag): """ calculate the y coordinate given only the x value. there are 2 possible solutions, use 'flag' to select. """ cq = self.field.p x = self.field.value(x) ysquare = x ** 3 + self.a * x + self.b ysquare_root = sqrtCQ(ysquare.value, cq) bit0 = 0 if ysquare_root % 2 is not 0: bit0 = 1 if bit0 != flag: beta = (cq - ysquare_root) % cq else: beta = ysquare_root return self.point(x, beta)
calculate the y coordinate given only the x value. there are 2 possible solutions, use 'flag' to select.
def int2str(num, radix=10, alphabet=BASE85): """helper function for quick base conversions from integers to strings""" return NumConv(radix, alphabet).int2str(num)
helper function for quick base conversions from integers to strings
def im_json_to_graph(im_json): """Return networkx graph from Kappy's influence map JSON. Parameters ---------- im_json : dict A JSON dict which contains an influence map generated by Kappy. Returns ------- graph : networkx.MultiDiGraph A graph representing the influence map. """ imap_data = im_json['influence map']['map'] # Initialize the graph graph = MultiDiGraph() id_node_dict = {} # Add each node to the graph for node_dict in imap_data['nodes']: # There is always just one entry here with the node type e.g. "rule" # as key, and all the node data as the value node_type, node = list(node_dict.items())[0] # Add the node to the graph with its label and type attrs = {'fillcolor': '#b7d2ff' if node_type == 'rule' else '#cdffc9', 'shape': 'box' if node_type == 'rule' else 'oval', 'style': 'filled'} graph.add_node(node['label'], node_type=node_type, **attrs) # Save the key of the node to refer to it later new_key = '%s%s' % (node_type, node['id']) id_node_dict[new_key] = node['label'] def add_edges(link_list, edge_sign): attrs = {'sign': edge_sign, 'color': 'green' if edge_sign == 1 else 'red', 'arrowhead': 'normal' if edge_sign == 1 else 'tee'} for link_dict in link_list: source = link_dict['source'] for target_dict in link_dict['target map']: target = target_dict['target'] src_id = '%s%s' % list(source.items())[0] tgt_id = '%s%s' % list(target.items())[0] graph.add_edge(id_node_dict[src_id], id_node_dict[tgt_id], **attrs) # Add all the edges from the positive and negative influences add_edges(imap_data['wake-up map'], 1) add_edges(imap_data['inhibition map'], -1) return graph
Return networkx graph from Kappy's influence map JSON. Parameters ---------- im_json : dict A JSON dict which contains an influence map generated by Kappy. Returns ------- graph : networkx.MultiDiGraph A graph representing the influence map.
def install(cls): """Create the required directories in the home directory""" [os.makedirs('{}/{}'.format(cls.home, cls.dirs[d])) for d in cls.dirs]
Create the required directories in the home directory
def toggle_wrap_mode(self, checked): """Toggle wrap mode""" self.plain_text.editor.toggle_wrap_mode(checked) self.set_option('wrap', checked)
Toggle wrap mode
def join_json_files(prefix): """Join different REACH output JSON files into a single JSON object. The output of REACH is broken into three files that need to be joined before processing. Specifically, there will be three files of the form: `<prefix>.uaz.<subcategory>.json`. Parameters ---------- prefix : str The absolute path up to the extensions that reach will add. Returns ------- json_obj : dict The result of joining the files, keyed by the three subcategories. """ try: with open(prefix + '.uaz.entities.json', 'rt') as f: entities = json.load(f) with open(prefix + '.uaz.events.json', 'rt') as f: events = json.load(f) with open(prefix + '.uaz.sentences.json', 'rt') as f: sentences = json.load(f) except IOError as e: logger.error( 'Failed to open JSON files for %s; REACH error?' % prefix ) logger.exception(e) return None return {'events': events, 'entities': entities, 'sentences': sentences}
Join different REACH output JSON files into a single JSON object. The output of REACH is broken into three files that need to be joined before processing. Specifically, there will be three files of the form: `<prefix>.uaz.<subcategory>.json`. Parameters ---------- prefix : str The absolute path up to the extensions that reach will add. Returns ------- json_obj : dict The result of joining the files, keyed by the three subcategories.
def _readsie(self, pos): """Return interpretation of next bits as a signed interleaved exponential-Golomb code. Advances position to after the read code. Raises ReadError if the end of the bitstring is encountered while reading the code. """ codenum, pos = self._readuie(pos) if not codenum: return 0, pos try: if self[pos]: return -codenum, pos + 1 else: return codenum, pos + 1 except IndexError: raise ReadError("Read off end of bitstring trying to read code.")
Return interpretation of next bits as a signed interleaved exponential-Golomb code. Advances position to after the read code. Raises ReadError if the end of the bitstring is encountered while reading the code.
def onUserError(self, fail, message): """ Handle user errors """ self.log.error(fail) self.log.error(message)
Handle user errors
def load_obj(fn): """Load 3d mesh form .obj' file. Args: fn: Input file name or file-like object. Returns: dictionary with the following keys (some of which may be missing): position: np.float32, (n, 3) array, vertex positions uv: np.float32, (n, 2) array, vertex uv coordinates normal: np.float32, (n, 3) array, vertex uv normals face: np.int32, (k*3,) traingular face indices """ position = [np.zeros(3, dtype=np.float32)] normal = [np.zeros(3, dtype=np.float32)] uv = [np.zeros(2, dtype=np.float32)] tuple2idx = OrderedDict() trinagle_indices = [] input_file = open(fn) if isinstance(fn, str) else fn for line in input_file: line = line.strip() if not line or line[0] == '#': continue line = line.split(' ', 1) tag = line[0] if len(line) > 1: line = line[1] else: line = '' if tag == 'v': position.append(np.fromstring(line, sep=' ')) elif tag == 'vt': uv.append(np.fromstring(line, sep=' ')) elif tag == 'vn': normal.append(np.fromstring(line, sep=' ')) elif tag == 'f': output_face_indices = [] for chunk in line.split(): # tuple order: pos_idx, uv_idx, normal_idx vt = _parse_vertex_tuple(chunk) if vt not in tuple2idx: # create a new output vertex? tuple2idx[vt] = len(tuple2idx) output_face_indices.append(tuple2idx[vt]) # generate face triangles for i in range(1, len(output_face_indices)-1): for vi in [0, i, i+1]: trinagle_indices.append(output_face_indices[vi]) outputs = {} outputs['face'] = np.int32(trinagle_indices) pos_idx, uv_idx, normal_idx = np.int32(list(tuple2idx)).T if np.any(pos_idx): outputs['position'] = _unify_rows(position)[pos_idx] if np.any(uv_idx): outputs['uv'] = _unify_rows(uv)[uv_idx] if np.any(normal_idx): outputs['normal'] = _unify_rows(normal)[normal_idx] return outputs
Load 3d mesh form .obj' file. Args: fn: Input file name or file-like object. Returns: dictionary with the following keys (some of which may be missing): position: np.float32, (n, 3) array, vertex positions uv: np.float32, (n, 2) array, vertex uv coordinates normal: np.float32, (n, 3) array, vertex uv normals face: np.int32, (k*3,) traingular face indices
def get_model(sender, model_name, model_inst, model_info, model_config): """ #todo Add objcache support """ MC = get_mc() if MC: model = MC.get((MC.c.model_name==model_name) & (MC.c.uuid!='')) if model: cached_inst = __cache__.get(model_name) if not cached_inst or (cached_inst and cached_inst[1]!=model.uuid): model_inst = model.get_instance() M = orm.create_model(model_name, fields=eval(model_inst.fields or '[]'), indexes=eval(model_inst.indexes or '[]'), basemodel=model_inst.basemodel, __replace__=True) __cache__[model_name] = (M, model.uuid) #process extension model if model_inst.has_extension: ext_model_name = model_name + '_extension' fields = eval(model_inst.extension_fields or '[]') fields.insert(0, {'name':'_parent', 'type':'OneToOne', 'reference_class':model_name, 'collection_name':'ext'}) ME = orm.create_model(ext_model_name, fields=fields, indexes=eval(model_inst.extension_indexes or '[]'), basemodel=model_inst.extension_model, __replace__=True) else: M = cached_inst[0] return M
#todo Add objcache support
def bind_key(pymux, variables): """ Bind a key sequence. -n: Not necessary to use the prefix. """ key = variables['<key>'] command = variables['<command>'] arguments = variables['<arguments>'] needs_prefix = not variables['-n'] try: pymux.key_bindings_manager.add_custom_binding( key, command, arguments, needs_prefix=needs_prefix) except ValueError: raise CommandException('Invalid key: %r' % (key, ))
Bind a key sequence. -n: Not necessary to use the prefix.
def create_es(self): """Create an ES (intermediate) file for this BAM file. This is the function which asses if an alignment is correct """ with (gzip.open(self._es_fn, "tw+") if self.compress_intermediate_files else open(self._es_fn, "w+")) as es_fo: self.bam2es( bam_fn=self._bam_fn, es_fo=es_fo, allowed_delta=self.report.allowed_delta, )
Create an ES (intermediate) file for this BAM file. This is the function which asses if an alignment is correct
def get_artist(self, id_): """Data for a specific artist.""" endpoint = "artists/{id}".format(id=id_) return self._make_request(endpoint)
Data for a specific artist.
def objects_to_record(self, preference=None): """Create file records from objects. """ from ambry.orm.file import File raise NotImplementedError("Still uses obsolete file_info_map") for file_const, (file_name, clz) in iteritems(file_info_map): f = self.file(file_const) pref = preference if preference else f.record.preference if pref in (File.PREFERENCE.MERGE, File.PREFERENCE.OBJECT): self._bundle.logger.debug(' otr {}'.format(file_const)) f.objects_to_record()
Create file records from objects.
def restore_defaults_ratio(self): """Restore InaSAFE default ratio.""" # Set the flag to true because user ask to. self.is_restore_default = True # remove current default ratio for i in reversed(list(range(self.container_layout.count()))): widget = self.container_layout.itemAt(i).widget() if widget is not None: widget.setParent(None) # reload default ratio self.restore_default_values_page()
Restore InaSAFE default ratio.
def _get_suffix(path): """ Return suffix from `path`. ``/home/xex/somefile.txt`` --> ``txt``. Args: path (str): Full file path. Returns: str: Suffix. Raises: UserWarning: When ``/`` is detected in suffix. """ suffix = os.path.basename(path).split(".")[-1] if "/" in suffix: raise UserWarning("Filename can't contain '/' in suffix (%s)!" % path) return suffix
Return suffix from `path`. ``/home/xex/somefile.txt`` --> ``txt``. Args: path (str): Full file path. Returns: str: Suffix. Raises: UserWarning: When ``/`` is detected in suffix.
def log_action(self, instance, action, action_date=None, url="", update_parent=True): """ Store an action in the database using the CMSLog model. The following attributes are calculated and set on the log entry: * **model_repr** - A unicode representation of the instance. * **object_repr** - The verbose_name of the instance model class. * **section** - The name of ancestor bundle that is directly \ attached to the admin site. :param instance: The instance that this action was performed \ on. :param action: The action type. Must be one of the options \ in CMSLog.ACTIONS. :param action_date: The datetime the action occurred. :param url: The url that the log entry should point to, \ Defaults to an empty string. :param update_parent: If true this will update the last saved time \ on the object pointed to by this bundle's object_view. \ Defaults to True. """ section = None if self.bundle: bundle = self.bundle while bundle.parent: bundle = bundle.parent section = bundle.name # if we have a object view that comes from somewhere else # save it too to update it. changed_object = instance bundle = self.bundle while bundle.object_view == bundle.parent_attr: bundle = bundle.parent if update_parent and changed_object.__class__ != bundle._meta.model: object_view, name = bundle.get_initialized_view_and_name( bundle.object_view, kwargs=self.kwargs) changed_object = object_view.get_object() changed_object.save() if not section: section = "" if url: url = urlparse.urlparse(url).path rep = unicode(instance) if rep: rep = rep[:255] log = CMSLog(action=action, url=url, section=section, model_repr=instance._meta.verbose_name, object_repr=rep, user_name=self.request.user.username, action_date=action_date) log.save()
Store an action in the database using the CMSLog model. The following attributes are calculated and set on the log entry: * **model_repr** - A unicode representation of the instance. * **object_repr** - The verbose_name of the instance model class. * **section** - The name of ancestor bundle that is directly \ attached to the admin site. :param instance: The instance that this action was performed \ on. :param action: The action type. Must be one of the options \ in CMSLog.ACTIONS. :param action_date: The datetime the action occurred. :param url: The url that the log entry should point to, \ Defaults to an empty string. :param update_parent: If true this will update the last saved time \ on the object pointed to by this bundle's object_view. \ Defaults to True.
def _xml_escape_attr(attr, skip_single_quote=True): """Escape the given string for use in an HTML/XML tag attribute. By default this doesn't bother with escaping `'` to `&#39;`, presuming that the tag attribute is surrounded by double quotes. """ escaped = (attr .replace('&', '&amp;') .replace('"', '&quot;') .replace('<', '&lt;') .replace('>', '&gt;')) if not skip_single_quote: escaped = escaped.replace("'", "&#39;") return escaped
Escape the given string for use in an HTML/XML tag attribute. By default this doesn't bother with escaping `'` to `&#39;`, presuming that the tag attribute is surrounded by double quotes.
def focusOutEvent(self, event): """Reimplement Qt method to close the widget when loosing focus.""" event.ignore() # Inspired from CompletionWidget.focusOutEvent() in file # widgets/sourcecode/base.py line 212 if sys.platform == "darwin": if event.reason() != Qt.ActiveWindowFocusReason: self.close() else: self.close()
Reimplement Qt method to close the widget when loosing focus.
def sieve(cache, segment=None): """Filter the cache to find those entries that overlap ``segment`` Parameters ---------- cache : `list` Input list of file paths segment : `~gwpy.segments.Segment` The ``[start, stop)`` interval to match against. """ return type(cache)(e for e in cache if segment.intersects(file_segment(e)))
Filter the cache to find those entries that overlap ``segment`` Parameters ---------- cache : `list` Input list of file paths segment : `~gwpy.segments.Segment` The ``[start, stop)`` interval to match against.
def write_quick(self): """ Send only the read / write bit """ self.bus.write_quick(self.address) self.log.debug("write_quick: Sent the read / write bit")
Send only the read / write bit
def present(profile='pagerduty', subdomain=None, api_key=None, **kwargs): ''' Ensure that a pagerduty schedule exists. This method accepts as args everything defined in https://developer.pagerduty.com/documentation/rest/schedules/create. This means that most arguments are in a dict called "schedule." User id's can be pagerduty id, or name, or email address. ''' # for convenience, we accept id, name, or email as the user id. kwargs['schedule']['name'] = kwargs['name'] # match PD API structure for schedule_layer in kwargs['schedule']['schedule_layers']: for user in schedule_layer['users']: u = __salt__['pagerduty_util.get_resource']('users', user['user']['id'], ['email', 'name', 'id'], profile=profile, subdomain=subdomain, api_key=api_key) if u is None: raise Exception('unknown user: {0}'.format(user)) user['user']['id'] = u['id'] r = __salt__['pagerduty_util.resource_present']('schedules', ['name', 'id'], _diff, profile, subdomain, api_key, **kwargs) return r
Ensure that a pagerduty schedule exists. This method accepts as args everything defined in https://developer.pagerduty.com/documentation/rest/schedules/create. This means that most arguments are in a dict called "schedule." User id's can be pagerduty id, or name, or email address.
def bovy_ars(domain,isDomainFinite,abcissae,hx,hpx,nsamples=1, hxparams=(),maxn=100): """bovy_ars: Implementation of the Adaptive-Rejection Sampling algorithm by Gilks & Wild (1992): Adaptive Rejection Sampling for Gibbs Sampling, Applied Statistics, 41, 337 Based on Wild & Gilks (1993), Algorithm AS 287: Adaptive Rejection Sampling from Log-concave Density Functions, Applied Statistics, 42, 701 Input: domain - [.,.] upper and lower limit to the domain isDomainFinite - [.,.] is there a lower/upper limit to the domain? abcissae - initial list of abcissae (must lie on either side of the peak in hx if the domain is unbounded hx - function that evaluates h(x) = ln g(x) hpx - function that evaluates hp(x) = d h(x) / d x nsamples - (optional) number of desired samples (default=1) hxparams - (optional) a tuple of parameters for h(x) and h'(x) maxn - (optional) maximum number of updates to the hull (default=100) Output: list with nsamples of samples from exp(h(x)) External dependencies: math scipy scipy.stats History: 2009-05-21 - Written - Bovy (NYU) """ #First set-up the upper and lower hulls hull=setup_hull(domain,isDomainFinite,abcissae,hx,hpx,hxparams) #Then start sampling: call sampleone repeatedly out= [] nupdates= 0 for ii in range(int(nsamples)): thissample, hull, nupdates= sampleone(hull,hx,hpx,domain,isDomainFinite,maxn,nupdates,hxparams) out.append(thissample) return out
bovy_ars: Implementation of the Adaptive-Rejection Sampling algorithm by Gilks & Wild (1992): Adaptive Rejection Sampling for Gibbs Sampling, Applied Statistics, 41, 337 Based on Wild & Gilks (1993), Algorithm AS 287: Adaptive Rejection Sampling from Log-concave Density Functions, Applied Statistics, 42, 701 Input: domain - [.,.] upper and lower limit to the domain isDomainFinite - [.,.] is there a lower/upper limit to the domain? abcissae - initial list of abcissae (must lie on either side of the peak in hx if the domain is unbounded hx - function that evaluates h(x) = ln g(x) hpx - function that evaluates hp(x) = d h(x) / d x nsamples - (optional) number of desired samples (default=1) hxparams - (optional) a tuple of parameters for h(x) and h'(x) maxn - (optional) maximum number of updates to the hull (default=100) Output: list with nsamples of samples from exp(h(x)) External dependencies: math scipy scipy.stats History: 2009-05-21 - Written - Bovy (NYU)
def meta_group(self, meta, meta_aggregates=None): """ *Wrapper of* ``GROUP`` Group operation only for metadata. For further information check :meth:`~.group` """ return self.group(meta=meta, meta_aggregates=meta_aggregates)
*Wrapper of* ``GROUP`` Group operation only for metadata. For further information check :meth:`~.group`
def GetAttachmentCollection(self, _id): """Get Attachments for given List Item ID""" # Build Request soap_request = soap('GetAttachmentCollection') soap_request.add_parameter('listName', self.listName) soap_request.add_parameter('listItemID', _id) self.last_request = str(soap_request) # Send Request response = self._session.post(url=self._url('Lists'), headers=self._headers('GetAttachmentCollection'), data=str(soap_request), verify=False, timeout=self.timeout) # Parse Request if response.status_code == 200: envelope = etree.fromstring(response.text.encode('utf-8'), parser=etree.XMLParser(huge_tree=self.huge_tree)) attaches = envelope[0][0][0][0] attachments = [] for attachment in attaches.getchildren(): attachments.append(attachment.text) return attachments else: return response
Get Attachments for given List Item ID
def log_likelihood(self, y, _const=math.log(2.0*math.pi), quiet=False): """ Compute the marginalized likelihood of the GP model The factorized matrix from the previous call to :func:`GP.compute` is used so ``compute`` must be called first. Args: y (array[n]): The observations at coordinates ``x`` from :func:`GP.compute`. quiet (bool): If true, return ``-numpy.inf`` for non-positive definite matrices instead of throwing an error. Returns: float: The marginalized likelihood of the GP model. Raises: ValueError: For mismatched dimensions. solver.LinAlgError: For non-positive definite matrices. """ y = self._process_input(y) resid = y - self.mean.get_value(self._t) try: self._recompute() except solver.LinAlgError: if quiet: return -np.inf raise if len(y.shape) > 1: raise ValueError("dimension mismatch") logdet = self.solver.log_determinant() if not np.isfinite(logdet): return -np.inf loglike = -0.5*(self.solver.dot_solve(resid)+logdet+len(y)*_const) if not np.isfinite(loglike): return -np.inf return loglike
Compute the marginalized likelihood of the GP model The factorized matrix from the previous call to :func:`GP.compute` is used so ``compute`` must be called first. Args: y (array[n]): The observations at coordinates ``x`` from :func:`GP.compute`. quiet (bool): If true, return ``-numpy.inf`` for non-positive definite matrices instead of throwing an error. Returns: float: The marginalized likelihood of the GP model. Raises: ValueError: For mismatched dimensions. solver.LinAlgError: For non-positive definite matrices.
def set_mode(self, anchor_id, mode): """ Send a packet to set the anchor mode. If the anchor receive the packet, it will change mode and resets. """ data = struct.pack('<BB', LoPoAnchor.LPP_TYPE_MODE, mode) self.crazyflie.loc.send_short_lpp_packet(anchor_id, data)
Send a packet to set the anchor mode. If the anchor receive the packet, it will change mode and resets.
def sendUssd(self, ussdString, responseTimeout=15): """ Starts a USSD session by dialing the the specified USSD string, or \ sends the specified string in the existing USSD session (if any) :param ussdString: The USSD access number to dial :param responseTimeout: Maximum time to wait a response, in seconds :raise TimeoutException: if no response is received in time :return: The USSD response message/session (as a Ussd object) :rtype: gsmmodem.modem.Ussd """ self._ussdSessionEvent = threading.Event() try: cusdResponse = self.write('AT+CUSD=1,"{0}",15'.format(ussdString), timeout=responseTimeout) # Should respond with "OK" except Exception: self._ussdSessionEvent = None # Cancel the thread sync lock raise # Some modems issue the +CUSD response before the acknowledgment "OK" - check for that if len(cusdResponse) > 1: cusdResponseFound = lineStartingWith('+CUSD', cusdResponse) != None if cusdResponseFound: self._ussdSessionEvent = None # Cancel thread sync lock return self._parseCusdResponse(cusdResponse) # Wait for the +CUSD notification message if self._ussdSessionEvent.wait(responseTimeout): self._ussdSessionEvent = None return self._ussdResponse else: # Response timed out self._ussdSessionEvent = None raise TimeoutException()
Starts a USSD session by dialing the the specified USSD string, or \ sends the specified string in the existing USSD session (if any) :param ussdString: The USSD access number to dial :param responseTimeout: Maximum time to wait a response, in seconds :raise TimeoutException: if no response is received in time :return: The USSD response message/session (as a Ussd object) :rtype: gsmmodem.modem.Ussd
def screensaver(): ''' Launch the screensaver. CLI Example: .. code-block:: bash salt '*' desktop.screensaver ''' cmd = 'open /System/Library/Frameworks/ScreenSaver.framework/Versions/A/Resources/ScreenSaverEngine.app' call = __salt__['cmd.run_all']( cmd, output_loglevel='debug', python_shell=False ) _check_cmd(call) return True
Launch the screensaver. CLI Example: .. code-block:: bash salt '*' desktop.screensaver
def _margtimephase_loglr(self, mf_snr, opt_snr): """Returns the log likelihood ratio marginalized over time and phase. """ return special.logsumexp(numpy.log(special.i0(mf_snr)), b=self._deltat) - 0.5*opt_snr
Returns the log likelihood ratio marginalized over time and phase.
def _prepare_discharge_hook(req, client): ''' Return the hook function (called when the response is received.) This allows us to intercept the response and do any necessary macaroon discharge before returning. ''' class Retry: # Define a local class so that we can use its class variable as # mutable state accessed by the closures below. count = 0 def hook(response, *args, **kwargs): ''' Requests hooks system, this is the hook for the response. ''' status_code = response.status_code if status_code != 407 and status_code != 401: return response if (status_code == 401 and response.headers.get('WWW-Authenticate') != 'Macaroon'): return response if response.headers.get('Content-Type') != 'application/json': return response errorJSON = response.json() if errorJSON.get('Code') != ERR_DISCHARGE_REQUIRED: return response error = Error.from_dict(errorJSON) Retry.count += 1 if Retry.count >= MAX_DISCHARGE_RETRIES: raise BakeryException('too many ({}) discharge requests'.format( Retry.count) ) client.handle_error(error, req.url) req.headers.pop('Cookie', None) req.prepare_cookies(client.cookies) req.headers[BAKERY_PROTOCOL_HEADER] = \ str(bakery.LATEST_VERSION) with requests.Session() as s: return s.send(req) return hook
Return the hook function (called when the response is received.) This allows us to intercept the response and do any necessary macaroon discharge before returning.
def _safe_output(line): ''' Looks for rabbitmqctl warning, or general formatting, strings that aren't intended to be parsed as output. Returns a boolean whether the line can be parsed as rabbitmqctl output. ''' return not any([ line.startswith('Listing') and line.endswith('...'), line.startswith('Listing') and '\t' not in line, '...done' in line, line.startswith('WARNING:') ])
Looks for rabbitmqctl warning, or general formatting, strings that aren't intended to be parsed as output. Returns a boolean whether the line can be parsed as rabbitmqctl output.
def set_coupl_old(self): """ Using the adjacency matrix, sample a coupling matrix. """ if self.model == 'krumsiek11' or self.model == 'var': # we already built the coupling matrix in set_coupl20() return self.Coupl = np.zeros((self.dim,self.dim)) for i in range(self.Adj.shape[0]): for j,a in enumerate(self.Adj[i]): # if there is a 1 in Adj, specify co and antiregulation # and strength of regulation if a != 0: co_anti = np.random.randint(2) # set a lower bound for the coupling parameters # they ought not to be smaller than 0.1 # and not be larger than 0.4 self.Coupl[i,j] = 0.0*np.random.rand() + 0.1 # set sign for coupling if co_anti == 1: self.Coupl[i,j] *= -1 # enforce certain requirements on models if self.model == 1: self.coupl_model1() elif self.model == 5: self.coupl_model5() elif self.model in [6,7]: self.coupl_model6() elif self.model in [8,9,10]: self.coupl_model8() # output if self.verbosity > 1: settings.m(0,self.Coupl)
Using the adjacency matrix, sample a coupling matrix.
def table_ensure(cls, rr): ''' Creates the table if it doesn't exist. ''' dbs = rr.db_list().run() if not rr.dbname in dbs: logging.info('creating rethinkdb database %s', repr(rr.dbname)) rr.db_create(rr.dbname).run() tables = rr.table_list().run() if not cls.table in tables: logging.info( 'creating rethinkdb table %s in database %s', repr(cls.table), repr(rr.dbname)) cls.table_create(rr)
Creates the table if it doesn't exist.
def get_project(id=None, name=None): """ Get a specific Project by ID or name """ content = get_project_raw(id, name) if content: return utils.format_json(content)
Get a specific Project by ID or name
def ReadGRRUser(self, username, cursor=None): """Reads a user object corresponding to a given name.""" cursor.execute( "SELECT username, password, ui_mode, canary_mode, user_type " "FROM grr_users WHERE username_hash = %s", [mysql_utils.Hash(username)]) row = cursor.fetchone() if row is None: raise db.UnknownGRRUserError(username) return self._RowToGRRUser(row)
Reads a user object corresponding to a given name.
def refresh(self)->None: "Apply any logit, flow, or affine transfers that have been sent to the `Image`." if self._logit_px is not None: self._px = self._logit_px.sigmoid_() self._logit_px = None if self._affine_mat is not None or self._flow is not None: self._px = _grid_sample(self._px, self.flow, **self.sample_kwargs) self.sample_kwargs = {} self._flow = None return self
Apply any logit, flow, or affine transfers that have been sent to the `Image`.
def enable_host_svc_notifications(self, host): """Enable services notifications for a host Format of the line that triggers function call:: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None """ for service_id in host.services: if service_id in self.daemon.services: service = self.daemon.services[service_id] self.enable_svc_notifications(service) self.send_an_element(service.get_update_status_brok())
Enable services notifications for a host Format of the line that triggers function call:: ENABLE_HOST_SVC_NOTIFICATIONS;<host_name> :param host: host to edit :type host: alignak.objects.host.Host :return: None
def compile_action_bound_constraints(self, state: Sequence[tf.Tensor]) -> Dict[str, Bounds]: '''Compiles all actions bounds for the given `state`. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from action names to a pair of :obj:`rddl2tf.fluent.TensorFluent` representing its lower and upper bounds. ''' scope = self.action_precondition_scope(state) lower_bounds = self.rddl.domain.action_lower_bound_constraints upper_bounds = self.rddl.domain.action_upper_bound_constraints with self.graph.as_default(): with tf.name_scope('action_bound_constraints'): bounds = {} for name in self.rddl.domain.action_fluent_ordering: lower_expr = lower_bounds.get(name) lower = None if lower_expr is not None: with tf.name_scope('lower_bound'): lower = self._compile_expression(lower_expr, scope) upper_expr = upper_bounds.get(name) upper = None if upper_expr is not None: with tf.name_scope('upper_bound'): upper = self._compile_expression(upper_expr, scope) bounds[name] = (lower, upper) return bounds
Compiles all actions bounds for the given `state`. Args: state (Sequence[tf.Tensor]): The current state fluents. Returns: A mapping from action names to a pair of :obj:`rddl2tf.fluent.TensorFluent` representing its lower and upper bounds.
def iscontainer(*items): """ Checks whether all the provided items are containers (i.e of class list, dict, tuple, etc...) """ return all(isinstance(i, Iterable) and not isinstance(i, basestring) for i in items)
Checks whether all the provided items are containers (i.e of class list, dict, tuple, etc...)
def minimum_geometries(self, n=None, symmetry_measure_type=None, max_csm=None): """ Returns a list of geometries with increasing continuous symmetry measure in this ChemicalEnvironments object :param n: Number of geometries to be included in the list :return: list of geometries with increasing continuous symmetry measure in this ChemicalEnvironments object :raise: ValueError if no coordination geometry is found in this ChemicalEnvironments object """ cglist = [cg for cg in self.coord_geoms] if symmetry_measure_type is None: csms = np.array([self.coord_geoms[cg]['other_symmetry_measures']['csm_wcs_ctwcc'] for cg in cglist]) else: csms = np.array([self.coord_geoms[cg]['other_symmetry_measures'][symmetry_measure_type] for cg in cglist]) csmlist = [self.coord_geoms[cg] for cg in cglist] isorted = np.argsort(csms) if max_csm is not None: if n is None: return [(cglist[ii], csmlist[ii]) for ii in isorted if csms[ii] <= max_csm] else: return [(cglist[ii], csmlist[ii]) for ii in isorted[:n] if csms[ii] <= max_csm] else: if n is None: return [(cglist[ii], csmlist[ii]) for ii in isorted] else: return [(cglist[ii], csmlist[ii]) for ii in isorted[:n]]
Returns a list of geometries with increasing continuous symmetry measure in this ChemicalEnvironments object :param n: Number of geometries to be included in the list :return: list of geometries with increasing continuous symmetry measure in this ChemicalEnvironments object :raise: ValueError if no coordination geometry is found in this ChemicalEnvironments object
def close_window(self, window_name=None, title=None, url=None): """ WebDriver implements only closing current window. If you want to close some window without having to switch to it, use this method. """ main_window_handle = self.current_window_handle self.switch_to_window(window_name, title, url) self.close() self.switch_to_window(main_window_handle)
WebDriver implements only closing current window. If you want to close some window without having to switch to it, use this method.
def backdoor_handler(clientsock, namespace=None): """start an interactive python interpreter on an existing connection .. note:: this function will block for as long as the connection remains alive. :param sock: the socket on which to serve the interpreter :type sock: :class:`Socket<greenhouse.io.sockets.Socket>` :param namespace: the local namespace dict for the interpreter, or None to have the function create its own empty namespace :type namespace: dict or None """ namespace = {} if namespace is None else namespace.copy() console = code.InteractiveConsole(namespace) multiline_statement = [] stdout, stderr = StringIO(), StringIO() clientsock.sendall(PREAMBLE + "\n" + PS1) for input_line in _produce_lines(clientsock): input_line = input_line.rstrip() if input_line: input_line = '\n' + input_line source = '\n'.join(multiline_statement) + input_line response = '' with _wrap_stdio(stdout, stderr): result = console.runsource(source) response += stdout.getvalue() err = stderr.getvalue() if err: response += err if err or not result: multiline_statement = [] response += PS1 else: multiline_statement.append(input_line) response += PS2 clientsock.sendall(response)
start an interactive python interpreter on an existing connection .. note:: this function will block for as long as the connection remains alive. :param sock: the socket on which to serve the interpreter :type sock: :class:`Socket<greenhouse.io.sockets.Socket>` :param namespace: the local namespace dict for the interpreter, or None to have the function create its own empty namespace :type namespace: dict or None
def _removeContentPanels(cls, remove): """ Remove the panels and so hide the fields named. """ if type(remove) is str: remove = [remove] cls.content_panels = [panel for panel in cls.content_panels if getattr(panel, "field_name", None) not in remove]
Remove the panels and so hide the fields named.
def disassociate_eip_address(public_ip=None, association_id=None, region=None, key=None, keyid=None, profile=None): ''' Disassociate an Elastic IP address from a currently running instance. This requires exactly one of either 'association_id' or 'public_ip', depending on whether you’re dealing with a VPC or EC2 Classic address. public_ip (string) – Public IP address, for EC2 Classic allocations. association_id (string) – Association ID for a VPC-bound EIP. returns (bool) - True on success, False on failure. CLI Example: .. code-block:: bash salt myminion boto_ec2.disassociate_eip_address association_id=eipassoc-e3ba2d16 .. versionadded:: 2016.3.0 ''' conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: return conn.disassociate_address(public_ip, association_id) except boto.exception.BotoServerError as e: log.error(e) return False
Disassociate an Elastic IP address from a currently running instance. This requires exactly one of either 'association_id' or 'public_ip', depending on whether you’re dealing with a VPC or EC2 Classic address. public_ip (string) – Public IP address, for EC2 Classic allocations. association_id (string) – Association ID for a VPC-bound EIP. returns (bool) - True on success, False on failure. CLI Example: .. code-block:: bash salt myminion boto_ec2.disassociate_eip_address association_id=eipassoc-e3ba2d16 .. versionadded:: 2016.3.0
def add_copy_spec_scl(self, scl, copyspecs): """Same as add_copy_spec, except that it prepends path to SCL root to "copyspecs". """ if isinstance(copyspecs, six.string_types): copyspecs = [copyspecs] scl_copyspecs = [] for copyspec in copyspecs: scl_copyspecs.append(self.convert_copyspec_scl(scl, copyspec)) self.add_copy_spec(scl_copyspecs)
Same as add_copy_spec, except that it prepends path to SCL root to "copyspecs".
def confidence_interval_hazard_(self): """ The confidence interval of the hazard. """ return self._compute_confidence_bounds_of_transform(self._hazard, self.alpha, self._ci_labels)
The confidence interval of the hazard.
def remove_edge_fun(graph): """ Returns a function that removes an edge from the `graph`. ..note:: The out node is removed if this is isolate. :param graph: A directed graph. :type graph: networkx.classes.digraph.DiGraph :return: A function that remove an edge from the `graph`. :rtype: callable """ # Namespace shortcut for speed. rm_edge, rm_node = graph.remove_edge, graph.remove_node from networkx import is_isolate def remove_edge(u, v): rm_edge(u, v) # Remove the edge. if is_isolate(graph, v): # Check if v is isolate. rm_node(v) # Remove the isolate out node. return remove_edge
Returns a function that removes an edge from the `graph`. ..note:: The out node is removed if this is isolate. :param graph: A directed graph. :type graph: networkx.classes.digraph.DiGraph :return: A function that remove an edge from the `graph`. :rtype: callable
def package_hidden(self): """ Flattens the hidden state from all LSTM layers into one tensor (for the sequence generator). """ if self.inference: hidden = torch.cat(tuple(itertools.chain(*self.next_hidden))) else: hidden = None return hidden
Flattens the hidden state from all LSTM layers into one tensor (for the sequence generator).
def map_to_matype(self, matype): """ Convert to the alpha vantage math type integer. It returns an integer correspondent to the type of math to apply to a function. It raises ValueError if an integer greater than the supported math types is given. Keyword Arguments: matype: The math type of the alpha vantage api. It accepts integers or a string representing the math type. * 0 = Simple Moving Average (SMA), * 1 = Exponential Moving Average (EMA), * 2 = Weighted Moving Average (WMA), * 3 = Double Exponential Moving Average (DEMA), * 4 = Triple Exponential Moving Average (TEMA), * 5 = Triangular Moving Average (TRIMA), * 6 = T3 Moving Average, * 7 = Kaufman Adaptive Moving Average (KAMA), * 8 = MESA Adaptive Moving Average (MAMA) """ # Check if it is an integer or a string try: value = int(matype) if abs(value) > len(AlphaVantage._ALPHA_VANTAGE_MATH_MAP): raise ValueError("The value {} is not supported".format(value)) except ValueError: value = AlphaVantage._ALPHA_VANTAGE_MATH_MAP.index(matype) return value
Convert to the alpha vantage math type integer. It returns an integer correspondent to the type of math to apply to a function. It raises ValueError if an integer greater than the supported math types is given. Keyword Arguments: matype: The math type of the alpha vantage api. It accepts integers or a string representing the math type. * 0 = Simple Moving Average (SMA), * 1 = Exponential Moving Average (EMA), * 2 = Weighted Moving Average (WMA), * 3 = Double Exponential Moving Average (DEMA), * 4 = Triple Exponential Moving Average (TEMA), * 5 = Triangular Moving Average (TRIMA), * 6 = T3 Moving Average, * 7 = Kaufman Adaptive Moving Average (KAMA), * 8 = MESA Adaptive Moving Average (MAMA)
def open_like(a, path, **kwargs): """Open a persistent array like `a`.""" _like_args(a, kwargs) if isinstance(a, Array): kwargs.setdefault('fill_value', a.fill_value) return open_array(path, **kwargs)
Open a persistent array like `a`.
def toLily(self): ''' Method which converts the object instance, its attributes and children to a string of lilypond code :return: str of lilypond code ''' self.CheckDivisions() self.CheckTotals() staves = self.GetChildrenIndexes() name = "" shortname = "" if hasattr(self.item, "name"): name = self.item.name name = helpers.SplitString(name) if hasattr(self.item, "shortname"): shortname = helpers.SplitString(self.item.shortname) variables = self.CalculateVariable(str(self.index), staves) first_part = "" for staff, variable in zip(staves, variables): staffstring = variable if hasattr( self.GetChild(staff), "tab") and self.GetChild(staff).tab: staffstring += " = \\new TabStaff" elif hasattr(self.GetChild(staff), "drum") and self.GetChild(staff).drum: staffstring += " = \\drums" else: staffstring += " = \\new Staff" if len(staves) == 1: if name != "": staffstring += " \with {\n" staffstring += "instrumentName = " + name + " \n" if shortname != "": staffstring += "shortInstrumentName = " + \ shortname + " \n" staffstring += " }" staffstring += "{" + self.GetChild(staff).toLily() + " }\n\n" first_part += staffstring second_part = "" if len(variables) > 1: second_part += "\\new StaffGroup " if name != "": second_part += "\with {\n" second_part += "instrumentName = " + name + " \n" second_part += " }" second_part += "<<" second_part += "\n".join(["\\" + var for var in variables]) if len(variables) > 1: second_part += ">>" return [first_part, second_part]
Method which converts the object instance, its attributes and children to a string of lilypond code :return: str of lilypond code
def _load_plugin(self, plugin_script, args=None, config=None): """Load the plugin (script), init it and add to the _plugin dict.""" # The key is the plugin name # for example, the file glances_xxx.py # generate self._plugins_list["xxx"] = ... name = plugin_script[len(self.header):-3].lower() try: # Import the plugin plugin = __import__(plugin_script[:-3]) # Init and add the plugin to the dictionary if name in ('help', 'amps', 'ports', 'folders'): self._plugins[name] = plugin.Plugin(args=args, config=config) else: self._plugins[name] = plugin.Plugin(args=args) # Set the disable_<name> to False by default if self.args is not None: setattr(self.args, 'disable_' + name, getattr(self.args, 'disable_' + name, False)) except Exception as e: # If a plugin can not be log, display a critical message # on the console but do not crash logger.critical("Error while initializing the {} plugin ({})".format(name, e)) logger.error(traceback.format_exc())
Load the plugin (script), init it and add to the _plugin dict.
def resolve(self, token): """Attempts to resolve the :class:`SymbolToken` against the current table. If the ``text`` is not None, the token is returned, otherwise, a token in the table is attempted to be retrieved. If not token is found, then this method will raise. """ if token.text is not None: return token resolved_token = self.symbol_table.get(token.sid, None) if resolved_token is None: raise IonException('Out of range SID: %d' % token.sid) return resolved_token
Attempts to resolve the :class:`SymbolToken` against the current table. If the ``text`` is not None, the token is returned, otherwise, a token in the table is attempted to be retrieved. If not token is found, then this method will raise.
def map_constructor(self, loader, node, deep=False): """ Walk the mapping, recording any duplicate keys. """ mapping = {} for key_node, value_node in node.value: key = loader.construct_object(key_node, deep=deep) value = loader.construct_object(value_node, deep=deep) if key in mapping: raise ValueError(f"Duplicate key: \"{key}\"") mapping[key] = value return mapping
Walk the mapping, recording any duplicate keys.
def group_citation_edges(edges: Iterable[EdgeTuple]) -> Iterable[Tuple[str, Iterable[EdgeTuple]]]: """Return an iterator over pairs of citation values and their corresponding edge iterators.""" return itt.groupby(edges, key=_citation_sort_key)
Return an iterator over pairs of citation values and their corresponding edge iterators.
def set_webhook(self, *args, **kwargs): """See :func:`set_webhook`""" return set_webhook(*args, **self._merge_overrides(**kwargs)).run()
See :func:`set_webhook`
def html(self): """ Returns ``innerHTML`` of whole page. On page have to be tag ``body``. .. versionadded:: 2.2 """ try: body = self.get_elm(tag_name='body') except selenium_exc.NoSuchElementException: return None else: return body.get_attribute('innerHTML')
Returns ``innerHTML`` of whole page. On page have to be tag ``body``. .. versionadded:: 2.2
def no_new_errors(new_data, old_data, strict=False): """ Pylint Validator that will fail any review if there are new Pylint errors in it (Pylint message starts with 'E:') :param new_data: :param old_data: :return: """ success = True score = 0 message = '' if new_data['errors'] > old_data['errors']: success = False message = "Failed, More errors than prior runs!({} > {})\n" \ "Average Score: {}".format(new_data['errors'], old_data['errors'], new_data['average']) score = -1 return success, score, message
Pylint Validator that will fail any review if there are new Pylint errors in it (Pylint message starts with 'E:') :param new_data: :param old_data: :return:
def _set_anycast_gateway_mac(self, v, load=False): """ Setter method for anycast_gateway_mac, mapped from YANG variable /rbridge_id/ipv6/static_ag_ipv6_config/anycast_gateway_mac (container) If this variable is read-only (config: false) in the source YANG file, then _set_anycast_gateway_mac is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_anycast_gateway_mac() directly. YANG Description: Anycast gateway MAC address. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=anycast_gateway_mac.anycast_gateway_mac, is_container='container', presence=False, yang_name="anycast-gateway-mac", rest_name="anycast-gateway-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Anycast gateway MAC address.'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """anycast_gateway_mac must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=anycast_gateway_mac.anycast_gateway_mac, is_container='container', presence=False, yang_name="anycast-gateway-mac", rest_name="anycast-gateway-mac", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Anycast gateway MAC address.'}}, namespace='urn:brocade.com:mgmt:brocade-vrrp', defining_module='brocade-vrrp', yang_type='container', is_config=True)""", }) self.__anycast_gateway_mac = t if hasattr(self, '_set'): self._set()
Setter method for anycast_gateway_mac, mapped from YANG variable /rbridge_id/ipv6/static_ag_ipv6_config/anycast_gateway_mac (container) If this variable is read-only (config: false) in the source YANG file, then _set_anycast_gateway_mac is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_anycast_gateway_mac() directly. YANG Description: Anycast gateway MAC address.
def update_params_for_auth(self, headers, querys, auth_settings): """ Updates header and query params based on authentication setting. :param headers: Header parameters dict to be updated. :param querys: Query parameters tuple list to be updated. :param auth_settings: Authentication setting identifiers list. """ if not auth_settings: return for auth in auth_settings: auth_setting = self.configuration.auth_settings().get(auth) if auth_setting: if not auth_setting['value']: continue elif auth_setting['in'] == 'header': headers[auth_setting['key']] = auth_setting['value'] elif auth_setting['in'] == 'query': querys.append((auth_setting['key'], auth_setting['value'])) else: raise ValueError( 'Authentication token must be in `query` or `header`' )
Updates header and query params based on authentication setting. :param headers: Header parameters dict to be updated. :param querys: Query parameters tuple list to be updated. :param auth_settings: Authentication setting identifiers list.
def _compute_counts_from_intensity(intensity, bexpcube): """ Make the counts map from the intensity """ data = intensity.data * np.sqrt(bexpcube.data[1:] * bexpcube.data[0:-1]) return HpxMap(data, intensity.hpx)
Make the counts map from the intensity
def get_pidfile(pidfile): ''' Return the pid from a pidfile as an integer ''' try: with salt.utils.files.fopen(pidfile) as pdf: pid = pdf.read().strip() return int(pid) except (OSError, IOError, TypeError, ValueError): return -1
Return the pid from a pidfile as an integer
def obj(self): """Returns the value of :meth:`ObjectMixin.get_object` and sets a private property called _obj. This property ensures the logic around allow_none is enforced across Endpoints using the Object interface. :raises: :class:`werkzeug.exceptions.BadRequest` :returns: The result of :meth:ObjectMixin.get_object` """ if not getattr(self, '_obj', None): self._obj = self.get_object() if self._obj is None and not self.allow_none: self.return_error(404) return self._obj
Returns the value of :meth:`ObjectMixin.get_object` and sets a private property called _obj. This property ensures the logic around allow_none is enforced across Endpoints using the Object interface. :raises: :class:`werkzeug.exceptions.BadRequest` :returns: The result of :meth:ObjectMixin.get_object`
def try_sort_fmt_opts(rdf_format_opts_list, uri): """reorder fmt options based on uri file type suffix - if available - so to test most likely serialization first when parsing some RDF NOTE this is not very nice as it is hardcoded and assumes the origin serializations to be this: ['turtle', 'xml', 'n3', 'nt', 'json-ld', 'rdfa'] """ filename, file_extension = os.path.splitext(uri) # print(filename, file_extension) if file_extension == ".ttl" or file_extension == ".turtle": return ['turtle', 'n3', 'nt', 'json-ld', 'rdfa', 'xml'] elif file_extension == ".xml" or file_extension == ".rdf": return ['xml', 'turtle', 'n3', 'nt', 'json-ld', 'rdfa'] elif file_extension == ".nt" or file_extension == ".n3": return ['n3', 'nt', 'turtle', 'xml', 'json-ld', 'rdfa'] elif file_extension == ".json" or file_extension == ".jsonld": return [ 'json-ld', 'rdfa', 'n3', 'nt', 'turtle', 'xml', ] elif file_extension == ".rdfa": return [ 'rdfa', 'json-ld', 'n3', 'nt', 'turtle', 'xml', ] else: return rdf_format_opts_list
reorder fmt options based on uri file type suffix - if available - so to test most likely serialization first when parsing some RDF NOTE this is not very nice as it is hardcoded and assumes the origin serializations to be this: ['turtle', 'xml', 'n3', 'nt', 'json-ld', 'rdfa']
def applySiiRanking(siiContainer, specfile): """Iterates over all Sii entries of a specfile in siiContainer and sorts Sii elements of the same spectrum according to the score attribute specified in ``siiContainer.info[specfile]['rankAttr']``. Sorted Sii elements are then ranked according to their sorted position, if multiple Sii have the same score, all get the same rank and the next entries rank is its list position. :param siiContainer: instance of :class:`maspy.core.SiiContainer` :param specfile: unambiguous identifier of a ms-run file. Is also used as a reference to other MasPy file containers. """ attr = siiContainer.info[specfile]['rankAttr'] reverse = siiContainer.info[specfile]['rankLargerBetter'] for itemList in listvalues(siiContainer.container[specfile]): sortList = [(getattr(sii, attr), sii) for sii in itemList] itemList = [sii for score, sii in sorted(sortList, reverse=reverse)] #Rank Sii according to their position lastValue = None for itemPosition, item in enumerate(itemList, 1): if getattr(item, attr) != lastValue: rank = itemPosition item.rank = rank lastValue = getattr(item, attr)
Iterates over all Sii entries of a specfile in siiContainer and sorts Sii elements of the same spectrum according to the score attribute specified in ``siiContainer.info[specfile]['rankAttr']``. Sorted Sii elements are then ranked according to their sorted position, if multiple Sii have the same score, all get the same rank and the next entries rank is its list position. :param siiContainer: instance of :class:`maspy.core.SiiContainer` :param specfile: unambiguous identifier of a ms-run file. Is also used as a reference to other MasPy file containers.
def _unpack_bin(self, packed): """ Internal. Decodes 16 bit RGB565 into python list [R,G,B] """ output = struct.unpack('H', packed) bits16 = output[0] r = (bits16 & 0xF800) >> 11 g = (bits16 & 0x7E0) >> 5 b = (bits16 & 0x1F) return [int(r << 3), int(g << 2), int(b << 3)]
Internal. Decodes 16 bit RGB565 into python list [R,G,B]
def database( state, host, name, present=True, owner=None, template=None, encoding=None, lc_collate=None, lc_ctype=None, tablespace=None, connection_limit=None, # Details for speaking to PostgreSQL via `psql` CLI postgresql_user=None, postgresql_password=None, postgresql_host=None, postgresql_port=None, ): ''' Add/remove PostgreSQL databases. + name: name of the database + present: whether the database should exist or not + owner: the PostgreSQL role that owns the database + template: name of the PostgreSQL template to use + encoding: encoding of the database + lc_collate: lc_collate of the database + lc_ctype: lc_ctype of the database + tablespace: the tablespace to use for the template + connection_limit: the connection limit to apply to the database + postgresql_*: global module arguments, see above Updates: pyinfra will not attempt to change existing databases - it will either create or drop databases, but not alter them (if the db exists this operation will make no changes). ''' current_databases = host.fact.postgresql_databases( postgresql_user, postgresql_password, postgresql_host, postgresql_port, ) is_present = name in current_databases if not present: if is_present: yield make_execute_psql_command( 'DROP DATABASE {0}'.format(name), user=postgresql_user, password=postgresql_password, host=postgresql_host, port=postgresql_port, ) return # We want the database but it doesn't exist if present and not is_present: sql_bits = ['CREATE DATABASE {0}'.format(name)] for key, value in ( ('OWNER', owner), ('TEMPLATE', template), ('ENCODING', encoding), ('LC_COLLATE', lc_collate), ('LC_CTYPE', lc_ctype), ('TABLESPACE', tablespace), ('CONNECTION LIMIT', connection_limit), ): if value: sql_bits.append('{0} {1}'.format(key, value)) yield make_execute_psql_command( ' '.join(sql_bits), user=postgresql_user, password=postgresql_password, host=postgresql_host, port=postgresql_port, )
Add/remove PostgreSQL databases. + name: name of the database + present: whether the database should exist or not + owner: the PostgreSQL role that owns the database + template: name of the PostgreSQL template to use + encoding: encoding of the database + lc_collate: lc_collate of the database + lc_ctype: lc_ctype of the database + tablespace: the tablespace to use for the template + connection_limit: the connection limit to apply to the database + postgresql_*: global module arguments, see above Updates: pyinfra will not attempt to change existing databases - it will either create or drop databases, but not alter them (if the db exists this operation will make no changes).
def to(self, unit): """Convert this distance to the given AstroPy unit.""" from astropy.units import au return (self.au * au).to(unit)
Convert this distance to the given AstroPy unit.
def project_branches(self): """ List all branches associated with a repository. :return: """ request_url = "{}git/branches".format(self.create_basic_url()) return_value = self._call_api(request_url) return return_value['branches']
List all branches associated with a repository. :return:
def ob_is_tty(ob): """ checks if an object (like a file-like object) is a tty. """ fileno = get_fileno(ob) is_tty = False if fileno: is_tty = os.isatty(fileno) return is_tty
checks if an object (like a file-like object) is a tty.
def add_argument(self, parser, permissive=False, **override_kwargs): """Add an option to a an argparse parser. :keyword permissive: when true, build a parser that does not validate required arguments. """ kwargs = {} required = None if self.kwargs: kwargs = copy.copy(self.kwargs) if 'env' in kwargs and 'help' in kwargs: kwargs['help'] = "%s (or set %s)" % (kwargs['help'], kwargs['env']) if permissive: required = kwargs.pop('required', None) try: del kwargs['env'] except KeyError: pass try: del kwargs['ini_section'] except KeyError: pass # allow custom and/or exclusive argument groups if kwargs.get('group') or kwargs.get('mutually_exclusive'): groupname = kwargs.pop('group', None) or kwargs.get('dest') mutually_exclusive = kwargs.pop('mutually_exclusive', None) if not groupname: raise NoGroupForOption( "%s requires either 'group' or 'dest'." % self) description = kwargs.pop('group_description', None) exists = [grp for grp in parser._action_groups if grp.title == groupname] if exists: group = exists[0] if description and not group.description: group.description = description else: group = parser.add_argument_group( title=groupname, description=description) if mutually_exclusive: if not required: required = kwargs.pop('required', None) mutexg_title = '%s mutually-exclusive-group' % groupname exists = [grp for grp in group._mutually_exclusive_groups if grp.title == mutexg_title] if exists: group = exists[0] else: # extend parent group group = group.add_mutually_exclusive_group( required=required) group.title = mutexg_title # if any in the same group are required, then the # mutually exclusive group should be set to required if required and not group.required: group.required = required self._mutexgroup = group self._action = group.add_argument(*self.args, **kwargs) return kwargs.update(override_kwargs) self._action = parser.add_argument(*self.args, **kwargs)
Add an option to a an argparse parser. :keyword permissive: when true, build a parser that does not validate required arguments.
def get_entry_type(hosts_entry=None): """ Return the type of entry for the line of hosts file passed :param hosts_entry: A line from the hosts file :return: 'comment' | 'blank' | 'ipv4' | 'ipv6' """ if hosts_entry and isinstance(hosts_entry, str): entry = hosts_entry.strip() if not entry or not entry[0] or entry[0] == "\n": return 'blank' if entry[0] == "#": return 'comment' entry_chunks = entry.split() if is_ipv6(entry_chunks[0]): return 'ipv6' if is_ipv4(entry_chunks[0]): return 'ipv4'
Return the type of entry for the line of hosts file passed :param hosts_entry: A line from the hosts file :return: 'comment' | 'blank' | 'ipv4' | 'ipv6'
def level_at_index(self, index): """ Return the list of nodes at level ``index``, in DFS order. :param int index: the index :rtype: list of :class:`~aeneas.tree.Tree` :raises: ValueError if the given ``index`` is not valid """ if not isinstance(index, int): self.log_exc(u"Index is not an integer", None, True, TypeError) levels = self.levels if (index < 0) or (index >= len(levels)): self.log_exc(u"The given level index '%d' is not valid" % (index), None, True, ValueError) return self.levels[index]
Return the list of nodes at level ``index``, in DFS order. :param int index: the index :rtype: list of :class:`~aeneas.tree.Tree` :raises: ValueError if the given ``index`` is not valid
def _parse_timeframe_line(self, line): """Parse timeframe line and return start and end timestamps.""" tf = self._validate_timeframe_line(line) if not tf: raise MalformedCaptionError('Invalid time format') return tf.group(1), tf.group(2)
Parse timeframe line and return start and end timestamps.
def status(self): """ The current status of the event (started, finished or pending). """ myNow = timezone.localtime(timezone=self.tz) daysDelta = dt.timedelta(days=self.num_days - 1) # NB: postponements can be created after the until date # so ignore that todayStart = getAwareDatetime(myNow.date(), dt.time.min, self.tz) eventStart, event = self.__afterOrPostponedTo(todayStart - daysDelta) if eventStart is None: return "finished" eventFinish = getAwareDatetime(eventStart.date() + daysDelta, event.time_to, self.tz) if event.time_from is None: eventStart += _1day if eventStart < myNow < eventFinish: # if there are two occurences on the same day then we may miss # that one of them has started return "started" if (self.repeat.until and eventFinish < myNow and self.__afterOrPostponedTo(myNow)[0] is None): # only just wound up, the last occurence was earlier today return "finished"
The current status of the event (started, finished or pending).
def in_date(objet, pattern): """ abstractSearch dans une date datetime.date""" if objet: pattern = re.sub(" ", '', pattern) objet_str = abstractRender.date(objet) return bool(re.search(pattern, objet_str)) return False
abstractSearch dans une date datetime.date
def get_mol_filename(chebi_id): '''Returns mol file''' mol = get_mol(chebi_id) if mol is None: return None file_descriptor, mol_filename = tempfile.mkstemp(str(chebi_id) + '_', '.mol') mol_file = open(mol_filename, 'w') mol_file.write(mol.get_structure()) mol_file.close() os.close(file_descriptor) return mol_filename
Returns mol file
def __validate_definitions(self, definitions, field): """ Validate a field's value against its defined rules. """ def validate_rule(rule): validator = self.__get_rule_handler('validate', rule) return validator(definitions.get(rule, None), field, value) definitions = self._resolve_rules_set(definitions) value = self.document[field] rules_queue = [ x for x in self.priority_validations if x in definitions or x in self.mandatory_validations ] rules_queue.extend( x for x in self.mandatory_validations if x not in rules_queue ) rules_queue.extend( x for x in definitions if x not in rules_queue and x not in self.normalization_rules and x not in ('allow_unknown', 'require_all', 'meta', 'required') ) self._remaining_rules = rules_queue while self._remaining_rules: rule = self._remaining_rules.pop(0) try: result = validate_rule(rule) # TODO remove on next breaking release if result: break except _SchemaRuleTypeError: break self._drop_remaining_rules()
Validate a field's value against its defined rules.
def autobuild_shiparchive(src_file): """Create a ship file archive containing a yaml_file and its dependencies. If yaml_file depends on any build products as external files, it must be a jinja2 template that references the file using the find_product filter so that we can figure out where those build products are going and create the right dependency graph. Args: src_file (str): The path to the input yaml file template. This file path must end .yaml.tpl and is rendered into a .yaml file and then packaged into a .ship file along with any products that are referenced in it. """ if not src_file.endswith('.tpl'): raise BuildError("You must pass a .tpl file to autobuild_shiparchive", src_file=src_file) env = Environment(tools=[]) family = ArchitectureGroup('module_settings.json') target = family.platform_independent_target() resolver = ProductResolver.Create() #Parse through build_step products to see what needs to imported custom_steps = [] for build_step in family.tile.find_products('build_step'): full_file_name = build_step.split(":")[0] basename = os.path.splitext(os.path.basename(full_file_name))[0] folder = os.path.dirname(full_file_name) fileobj, pathname, description = imp.find_module(basename, [folder]) mod = imp.load_module(basename, fileobj, pathname, description) full_file_name, class_name = build_step.split(":") custom_steps.append((class_name, getattr(mod, class_name))) env['CUSTOM_STEPS'] = custom_steps env["RESOLVER"] = resolver base_name, tpl_name = _find_basename(src_file) yaml_name = tpl_name[:-4] ship_name = yaml_name[:-5] + ".ship" output_dir = target.build_dirs()['output'] build_dir = os.path.join(target.build_dirs()['build'], base_name) tpl_path = os.path.join(build_dir, tpl_name) yaml_path = os.path.join(build_dir, yaml_name) ship_path = os.path.join(build_dir, ship_name) output_path = os.path.join(output_dir, ship_name) # We want to build up all related files in # <build_dir>/<ship archive_folder>/ # - First copy the template yaml over # - Then render the template yaml # - Then find all products referenced in the template yaml and copy them # - over # - Then build a .ship archive # - Then copy that archive into output_dir ship_deps = [yaml_path] env.Command([tpl_path], [src_file], Copy("$TARGET", "$SOURCE")) prod_deps = _find_product_dependencies(src_file, resolver) env.Command([yaml_path], [tpl_path], action=Action(template_shipfile_action, "Rendering $TARGET")) for prod in prod_deps: dest_file = os.path.join(build_dir, prod.short_name) ship_deps.append(dest_file) env.Command([dest_file], [prod.full_path], Copy("$TARGET", "$SOURCE")) env.Command([ship_path], [ship_deps], action=Action(create_shipfile, "Archiving Ship Recipe $TARGET")) env.Command([output_path], [ship_path], Copy("$TARGET", "$SOURCE"))
Create a ship file archive containing a yaml_file and its dependencies. If yaml_file depends on any build products as external files, it must be a jinja2 template that references the file using the find_product filter so that we can figure out where those build products are going and create the right dependency graph. Args: src_file (str): The path to the input yaml file template. This file path must end .yaml.tpl and is rendered into a .yaml file and then packaged into a .ship file along with any products that are referenced in it.
def ascend_bip32(bip32_pub_node, secret_exponent, child): """ Given a BIP32Node with public derivation child "child" with a known private key, return the secret exponent for the bip32_pub_node. """ i_as_bytes = struct.pack(">l", child) sec = public_pair_to_sec(bip32_pub_node.public_pair(), compressed=True) data = sec + i_as_bytes I64 = hmac.HMAC(key=bip32_pub_node._chain_code, msg=data, digestmod=hashlib.sha512).digest() I_left_as_exponent = from_bytes_32(I64[:32]) return (secret_exponent - I_left_as_exponent) % bip32_pub_node._generator.order()
Given a BIP32Node with public derivation child "child" with a known private key, return the secret exponent for the bip32_pub_node.
def run_pyxbgen(self, args): """Args: args: """ cmd = 'pyxbgen {}'.format(' '.join(args)) print(cmd) os.system(cmd)
Args: args:
def link_bus(self, bus_idx): """ Return the indices of elements linking the given buses :param bus_idx: :return: """ ret = [] if not self._config['is_series']: self.log( 'link_bus function is not valid for non-series model <{}>'. format(self.name)) return [] if isinstance(bus_idx, (int, float, str)): bus_idx = [bus_idx] fkey = list(self._ac.keys()) if 'bus' in fkey: fkey.remove('bus') nfkey = len(fkey) fkey_val = [self.__dict__[i] for i in fkey] for item in bus_idx: idx = [] key = [] for i in range(self.n): for j in range(nfkey): if fkey_val[j][i] == item: idx.append(self.idx[i]) key.append(fkey[j]) # <= 1 terminal should connect to the same bus break if len(idx) == 0: idx = None if len(key) == 0: key = None ret.append((idx, key)) return ret
Return the indices of elements linking the given buses :param bus_idx: :return:
def availability_set_present(name, resource_group, tags=None, platform_update_domain_count=None, platform_fault_domain_count=None, virtual_machines=None, sku=None, connection_auth=None, **kwargs): ''' .. versionadded:: 2019.2.0 Ensure an availability set exists. :param name: Name of the availability set. :param resource_group: The resource group assigned to the availability set. :param tags: A dictionary of strings can be passed as tag metadata to the availability set object. :param platform_update_domain_count: An optional parameter which indicates groups of virtual machines and underlying physical hardware that can be rebooted at the same time. :param platform_fault_domain_count: An optional parameter which defines the group of virtual machines that share a common power source and network switch. :param virtual_machines: A list of names of existing virtual machines to be included in the availability set. :param sku: The availability set SKU, which specifies whether the availability set is managed or not. Possible values are 'Aligned' or 'Classic'. An 'Aligned' availability set is managed, 'Classic' is not. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API. Example usage: .. code-block:: yaml Ensure availability set exists: azurearm_compute.availability_set_present: - name: aset1 - resource_group: group1 - platform_update_domain_count: 5 - platform_fault_domain_count: 3 - sku: aligned - tags: contact_name: Elmer Fudd Gantry - connection_auth: {{ profile }} - require: - azurearm_resource: Ensure resource group exists ''' ret = { 'name': name, 'result': False, 'comment': '', 'changes': {} } if not isinstance(connection_auth, dict): ret['comment'] = 'Connection information must be specified via connection_auth dictionary!' return ret if sku: sku = {'name': sku.capitalize()} aset = __salt__['azurearm_compute.availability_set_get']( name, resource_group, azurearm_log_level='info', **connection_auth ) if 'error' not in aset: tag_changes = __utils__['dictdiffer.deep_diff'](aset.get('tags', {}), tags or {}) if tag_changes: ret['changes']['tags'] = tag_changes if platform_update_domain_count and (int(platform_update_domain_count) != aset.get('platform_update_domain_count')): ret['changes']['platform_update_domain_count'] = { 'old': aset.get('platform_update_domain_count'), 'new': platform_update_domain_count } if platform_fault_domain_count and (int(platform_fault_domain_count) != aset.get('platform_fault_domain_count')): ret['changes']['platform_fault_domain_count'] = { 'old': aset.get('platform_fault_domain_count'), 'new': platform_fault_domain_count } if sku and (sku['name'] != aset.get('sku', {}).get('name')): ret['changes']['sku'] = { 'old': aset.get('sku'), 'new': sku } if virtual_machines: if not isinstance(virtual_machines, list): ret['comment'] = 'Virtual machines must be supplied as a list!' return ret aset_vms = aset.get('virtual_machines', []) remote_vms = sorted([vm['id'].split('/')[-1].lower() for vm in aset_vms if 'id' in aset_vms]) local_vms = sorted([vm.lower() for vm in virtual_machines or []]) if local_vms != remote_vms: ret['changes']['virtual_machines'] = { 'old': aset_vms, 'new': virtual_machines } if not ret['changes']: ret['result'] = True ret['comment'] = 'Availability set {0} is already present.'.format(name) return ret if __opts__['test']: ret['result'] = None ret['comment'] = 'Availability set {0} would be updated.'.format(name) return ret else: ret['changes'] = { 'old': {}, 'new': { 'name': name, 'virtual_machines': virtual_machines, 'platform_update_domain_count': platform_update_domain_count, 'platform_fault_domain_count': platform_fault_domain_count, 'sku': sku, 'tags': tags } } if __opts__['test']: ret['comment'] = 'Availability set {0} would be created.'.format(name) ret['result'] = None return ret aset_kwargs = kwargs.copy() aset_kwargs.update(connection_auth) aset = __salt__['azurearm_compute.availability_set_create_or_update']( name=name, resource_group=resource_group, virtual_machines=virtual_machines, platform_update_domain_count=platform_update_domain_count, platform_fault_domain_count=platform_fault_domain_count, sku=sku, tags=tags, **aset_kwargs ) if 'error' not in aset: ret['result'] = True ret['comment'] = 'Availability set {0} has been created.'.format(name) return ret ret['comment'] = 'Failed to create availability set {0}! ({1})'.format(name, aset.get('error')) return ret
.. versionadded:: 2019.2.0 Ensure an availability set exists. :param name: Name of the availability set. :param resource_group: The resource group assigned to the availability set. :param tags: A dictionary of strings can be passed as tag metadata to the availability set object. :param platform_update_domain_count: An optional parameter which indicates groups of virtual machines and underlying physical hardware that can be rebooted at the same time. :param platform_fault_domain_count: An optional parameter which defines the group of virtual machines that share a common power source and network switch. :param virtual_machines: A list of names of existing virtual machines to be included in the availability set. :param sku: The availability set SKU, which specifies whether the availability set is managed or not. Possible values are 'Aligned' or 'Classic'. An 'Aligned' availability set is managed, 'Classic' is not. :param connection_auth: A dict with subscription and authentication parameters to be used in connecting to the Azure Resource Manager API. Example usage: .. code-block:: yaml Ensure availability set exists: azurearm_compute.availability_set_present: - name: aset1 - resource_group: group1 - platform_update_domain_count: 5 - platform_fault_domain_count: 3 - sku: aligned - tags: contact_name: Elmer Fudd Gantry - connection_auth: {{ profile }} - require: - azurearm_resource: Ensure resource group exists
def add_f90_to_env(env): """Add Builders and construction variables for f90 to an Environment.""" try: F90Suffixes = env['F90FILESUFFIXES'] except KeyError: F90Suffixes = ['.f90'] #print("Adding %s to f90 suffixes" % F90Suffixes) try: F90PPSuffixes = env['F90PPFILESUFFIXES'] except KeyError: F90PPSuffixes = [] DialectAddToEnv(env, "F90", F90Suffixes, F90PPSuffixes, support_module = 1)
Add Builders and construction variables for f90 to an Environment.
def query_flag(ifo, name, start_time, end_time, source='any', server="segments.ligo.org", veto_definer=None, cache=False): """Return the times where the flag is active Parameters ---------- ifo: string The interferometer to query (H1, L1). name: string The status flag to query from LOSC. start_time: int The starting gps time to begin querying from LOSC end_time: int The end gps time of the query source: str, Optional Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may also be given. The default is to try GWOSC first then try dqsegdb. server: str, Optional The server path. Only used with dqsegdb atm. veto_definer: str, Optional The path to a veto definer to define groups of flags which themselves define a set of segments. cache: bool If true cache the query. Default is not to cache Returns --------- segments: glue.segments.segmentlist List of segments """ info = name.split(':') if len(info) == 2: segment_name, version = info elif len(info) == 1: segment_name = info[0] version = 1 flag_segments = segmentlist([]) if source in ['GWOSC', 'any']: # Special cases as the LOSC convention is backwards from normal # LIGO / Virgo operation!!!! if (('_HW_INJ' in segment_name and 'NO' not in segment_name) or 'VETO' in segment_name): data = query_flag(ifo, 'DATA', start_time, end_time) if '_HW_INJ' in segment_name: name = 'NO_' + segment_name else: name = segment_name.replace('_VETO', '') negate = query_flag(ifo, name, start_time, end_time, cache=cache) return (data - negate).coalesce() duration = end_time - start_time url = GWOSC_URL.format(get_run(start_time + duration/2), ifo, segment_name, int(start_time), int(duration)) try: fname = download_file(url, cache=cache) data = json.load(open(fname, 'r')) if 'segments' in data: flag_segments = data['segments'] except Exception as e: msg = "Unable to find segments in GWOSC, check flag name or times" print(e) if source != 'any': raise ValueError(msg) else: print("Tried and failed GWOSC {}, trying dqsegdb", name) return query_flag(ifo, segment_name, start_time, end_time, source='dqsegdb', server=server, veto_definer=veto_definer) elif source == 'dqsegdb': # Let's not hard require dqsegdb to be installed if we never get here. try: from dqsegdb.apicalls import dqsegdbQueryTimes as query except ImportError: raise ValueError("Could not query flag. Install dqsegdb" ":'pip install dqsegdb'") # The veto definer will allow the use of MACRO names # These directly correspond the name defined in the veto definer file. if veto_definer is not None: veto_def = parse_veto_definer(veto_definer) # We treat the veto definer name as if it were its own flag and # a process the flags in the veto definer if veto_definer is not None and segment_name in veto_def[ifo]: for flag in veto_def[ifo][segment_name]: segs = query("https", server, ifo, flag['name'], flag['version'], 'active', int(start_time), int(end_time))[0]['active'] # Apply padding to each segment for rseg in segs: seg_start = rseg[0] + flag['start_pad'] seg_end = rseg[1] + flag['end_pad'] flag_segments.append(segment(seg_start, seg_end)) # Apply start / end of the veto definer segment send = segmentlist([segment([veto_def['start'], veto_def['end']])]) flag_segments = (flag_segments.coalesce() & send) else: # Standard case just query directly. try: segs = query("https", server, ifo, name, version, 'active', int(start_time), int(end_time))[0]['active'] for rseg in segs: flag_segments.append(segment(rseg[0], rseg[1])) except Exception as e: print("Could not query flag, check name " " (%s) or times" % segment_name) raise e else: raise ValueError("Source must be dqsegdb or GWOSC." " Got {}".format(source)) return segmentlist(flag_segments).coalesce()
Return the times where the flag is active Parameters ---------- ifo: string The interferometer to query (H1, L1). name: string The status flag to query from LOSC. start_time: int The starting gps time to begin querying from LOSC end_time: int The end gps time of the query source: str, Optional Choice between "GWOSC" or "dqsegdb". If dqsegdb, the server option may also be given. The default is to try GWOSC first then try dqsegdb. server: str, Optional The server path. Only used with dqsegdb atm. veto_definer: str, Optional The path to a veto definer to define groups of flags which themselves define a set of segments. cache: bool If true cache the query. Default is not to cache Returns --------- segments: glue.segments.segmentlist List of segments
def build_src(ctx, dest=None): """ build source archive """ if dest: if not dest.startswith('/'): # Relative dest = os.path.join(os.getcwd(), dest) os.chdir(PROJECT_DIR) ctx.run('python setup.py sdist --dist-dir {0}'.format(dest)) else: os.chdir(PROJECT_DIR) ctx.run('python setup.py sdist')
build source archive
def polynomial(img, mask, inplace=False, replace_all=False, max_dev=1e-5, max_iter=20, order=2): ''' replace all masked values calculate flatField from 2d-polynomal fit filling all high gradient areas within averaged fit-image returns flatField, average background level, fitted image, valid indices mask ''' if inplace: out = img else: out = img.copy() lastm = 0 for _ in range(max_iter): out2 = polyfit2dGrid(out, mask, order=order, copy=not inplace, replace_all=replace_all) if replace_all: out = out2 break res = (np.abs(out2 - out)).mean() print('residuum: ', res) if res < max_dev: out = out2 break out = out2 mask = _highGrad(out) m = mask.sum() if m == lastm or m == img.size: break lastm = m out = np.clip(out, 0, 1, out=out) # if inplace else None) return out
replace all masked values calculate flatField from 2d-polynomal fit filling all high gradient areas within averaged fit-image returns flatField, average background level, fitted image, valid indices mask
def _k_value_tapered_reduction(ent_pipe_id, exit_pipe_id, fitting_angle, re, f): """Returns the minor loss coefficient for a tapered reducer. Parameters: ent_pipe_id: Entrance pipe's inner diameter. exit_pipe_id: Exit pipe's inner diameter. fitting_angle: Fitting angle between entrance and exit pipes. re: Reynold's number. f: Darcy friction factor. """ k_value_square_reduction = _k_value_square_reduction(ent_pipe_id, exit_pipe_id, re, f) if 45 < fitting_angle <= 180: return k_value_square_reduction * np.sqrt(np.sin(fitting_angle / 2)) elif 0 < fitting_angle <= 45: return k_value_square_reduction * 1.6 * np.sin(fitting_angle / 2) else: raise ValueError('k_value_tapered_reduction: The reducer angle (' + fitting_angle + ') cannot be outside of [0,180].')
Returns the minor loss coefficient for a tapered reducer. Parameters: ent_pipe_id: Entrance pipe's inner diameter. exit_pipe_id: Exit pipe's inner diameter. fitting_angle: Fitting angle between entrance and exit pipes. re: Reynold's number. f: Darcy friction factor.
def _kalman_prediction_step_SVD(k, p_m , p_P, p_dyn_model_callable, calc_grad_log_likelihood=False, p_dm = None, p_dP = None): """ Desctrete prediction function Input: k:int Iteration No. Starts at 0. Total number of iterations equal to the number of measurements. p_m: matrix of size (state_dim, time_series_no) Mean value from the previous step. For "multiple time series mode" it is matrix, second dimension of which correspond to different time series. p_P: tuple (Prev_cov, S, V) Covariance matrix from the previous step and its SVD decomposition. Prev_cov = V * S * V.T The tuple is (Prev_cov, S, V) p_dyn_model_callable: object calc_grad_log_likelihood: boolean Whether to calculate gradient of the marginal likelihood of the state-space model. If true then the next parameter must provide the extra parameters for gradient calculation. p_dm: 3D array (state_dim, time_series_no, parameters_no) Mean derivatives from the previous step. For "multiple time series mode" it is 3D array, second dimension of which correspond to different time series. p_dP: 3D array (state_dim, state_dim, parameters_no) Mean derivatives from the previous step Output: ---------------------------- m_pred, P_pred, dm_pred, dP_pred: metrices, 3D objects Results of the prediction steps. """ # covariance from the previous step and its SVD decomposition # p_prev_cov = v * S * V.T Prev_cov, S_old, V_old = p_P #p_prev_cov_tst = np.dot(p_V, (p_S * p_V).T) # reconstructed covariance from the previous step # index correspond to values from previous iteration. A = p_dyn_model_callable.Ak(k,p_m,Prev_cov) # state transition matrix (or Jacobian) Q = p_dyn_model_callable.Qk(k) # state noise matrx. This is necessary for the square root calculation (next step) Q_sr = p_dyn_model_callable.Q_srk(k) # Prediction step -> m_pred = p_dyn_model_callable.f_a(k, p_m, A) # predicted mean # coavariance prediction have changed: svd_1_matr = np.vstack( ( (np.sqrt(S_old)* np.dot(A,V_old)).T , Q_sr.T) ) (U,S,Vh) = sp.linalg.svd( svd_1_matr,full_matrices=False, compute_uv=True, overwrite_a=False,check_finite=True) # predicted variance computed by the regular method. For testing #P_pred_tst = A.dot(Prev_cov).dot(A.T) + Q V_new = Vh.T S_new = S**2 P_pred = np.dot(V_new * S_new, V_new.T) # prediction covariance P_pred = (P_pred, S_new, Vh.T) # Prediction step <- # derivatives if calc_grad_log_likelihood: dA_all_params = p_dyn_model_callable.dAk(k) # derivatives of A wrt parameters dQ_all_params = p_dyn_model_callable.dQk(k) # derivatives of Q wrt parameters param_number = p_dP.shape[2] # p_dm, p_dP - derivatives form the previoius step dm_pred = np.empty(p_dm.shape) dP_pred = np.empty(p_dP.shape) for j in range(param_number): dA = dA_all_params[:,:,j] dQ = dQ_all_params[:,:,j] #dP = p_dP[:,:,j] #dm = p_dm[:,:,j] dm_pred[:,:,j] = np.dot(dA, p_m) + np.dot(A, p_dm[:,:,j]) # prediction step derivatives for current parameter: dP_pred[:,:,j] = np.dot( dA ,np.dot(Prev_cov, A.T)) dP_pred[:,:,j] += dP_pred[:,:,j].T dP_pred[:,:,j] += np.dot( A ,np.dot(p_dP[:,:,j], A.T)) + dQ dP_pred[:,:,j] = 0.5*(dP_pred[:,:,j] + dP_pred[:,:,j].T) #symmetrize else: dm_pred = None dP_pred = None return m_pred, P_pred, dm_pred, dP_pred
Desctrete prediction function Input: k:int Iteration No. Starts at 0. Total number of iterations equal to the number of measurements. p_m: matrix of size (state_dim, time_series_no) Mean value from the previous step. For "multiple time series mode" it is matrix, second dimension of which correspond to different time series. p_P: tuple (Prev_cov, S, V) Covariance matrix from the previous step and its SVD decomposition. Prev_cov = V * S * V.T The tuple is (Prev_cov, S, V) p_dyn_model_callable: object calc_grad_log_likelihood: boolean Whether to calculate gradient of the marginal likelihood of the state-space model. If true then the next parameter must provide the extra parameters for gradient calculation. p_dm: 3D array (state_dim, time_series_no, parameters_no) Mean derivatives from the previous step. For "multiple time series mode" it is 3D array, second dimension of which correspond to different time series. p_dP: 3D array (state_dim, state_dim, parameters_no) Mean derivatives from the previous step Output: ---------------------------- m_pred, P_pred, dm_pred, dP_pred: metrices, 3D objects Results of the prediction steps.
def evaluate_trace_request(data, tracer=tracer): """ Evaluate given string trace request. Usage:: Umbra -t "{'umbra.engine' : ('.*', 0), 'umbra.preferences' : (r'.*', 0)}" Umbra -t "['umbra.engine', 'umbra.preferences']" Umbra -t "'umbra.engine, umbra.preferences" :param data: Trace request. :type data: unicode :param tracer: Tracer. :type tracer: object :return: Definition success. :rtype: bool """ data = ast.literal_eval(data) if isinstance(data, str): modules = dict.fromkeys(map(lambda x: x.strip(), data.split(",")), (None, None)) elif isinstance(data, list): modules = dict.fromkeys(data, (None, None)) elif isinstance(data, dict): modules = data for module, (pattern, flags) in modules.iteritems(): __import__(module) pattern = pattern if pattern is not None else r".*" flags = flags if flags is not None else re.IGNORECASE trace_module(sys.modules[module], tracer, pattern, flags) return True
Evaluate given string trace request. Usage:: Umbra -t "{'umbra.engine' : ('.*', 0), 'umbra.preferences' : (r'.*', 0)}" Umbra -t "['umbra.engine', 'umbra.preferences']" Umbra -t "'umbra.engine, umbra.preferences" :param data: Trace request. :type data: unicode :param tracer: Tracer. :type tracer: object :return: Definition success. :rtype: bool