code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
async def async_run_command(self, command, retry=False): if not self.is_connected: await self.async_connect() try: result = await asyncio.wait_for(self._client.run( "%s && %s" % (_PATH_EXPORT_COMMAND, command)), 9) except asyncssh.misc.ChannelOpenError: if not retry: await self.async_connect() return self.async_run_command(command, retry=True) else: self._connected = False _LOGGER.error("No connection to host") return [] except TimeoutError: del self._client self._connected = False _LOGGER.error("Host timeout.") return [] self._connected = True return result.stdout.split('\n')
Run commands through an SSH connection. Connect to the SSH server if not currently connected, otherwise use the existing connection.
def get_by_code(self, code): if any(x in code for x in ('_', '-')): cc = CultureCode.objects.get(code=code.replace('_', '-')) return cc.language elif len(code) == 2: return self.get(iso_639_1=code) elif len(code) == 3: return self.get(Q(iso_639_2T=code) | Q(iso_639_2B=code) | Q(iso_639_3=code)) raise ValueError( 'Code must be either 2, or 3 characters: "%s" is %s' % (code, len(code)))
Retrieve a language by a code. :param code: iso code (any of the three) or its culture code :return: a Language object
def get_dependencies(self, recursive=False): dependencies = set() for element in self.elements: if isinstance(element, CellReference) or isinstance( element, CellArray): if recursive: dependencies.update( element.ref_cell.get_dependencies(True)) dependencies.add(element.ref_cell) return dependencies
Returns a list of the cells included in this cell as references. Parameters ---------- recursive : bool If True returns cascading dependencies. Returns ------- out : set of ``Cell`` List of the cells referenced by this cell.
def common(self, other): mandatory = min(self.mandatory, other.mandatory) optional = min(self.optional, other.optional) return multiplier(mandatory, mandatory + optional)
Find the shared part of two multipliers. This is the largest multiplier which can be safely subtracted from both the originals. This may return the "zero" multiplier.
def get_task_cls(cls, name): task_cls = cls._get_reg().get(name) if not task_cls: raise TaskClassNotFoundException(cls._missing_task_msg(name)) if task_cls == cls.AMBIGUOUS_CLASS: raise TaskClassAmbigiousException('Task %r is ambiguous' % name) return task_cls
Returns an unambiguous class or raises an exception.
def load_meta_data(self, path=None, recursively=True): meta_data_path = path if path is not None else self.state_machine.file_system_path if meta_data_path: path_meta_data = os.path.join(meta_data_path, storage.FILE_NAME_META_DATA) try: tmp_meta = storage.load_data_file(path_meta_data) except ValueError: tmp_meta = {} else: tmp_meta = {} tmp_meta = Vividict(tmp_meta) if recursively: root_state_path = None if not path else os.path.join(path, self.root_state.state.state_id) self.root_state.load_meta_data(root_state_path) if tmp_meta: self.meta = tmp_meta self.meta_signal.emit(MetaSignalMsg("load_meta_data", "all", True))
Load meta data of state machine model from the file system The meta data of the state machine model is loaded from the file system and stored in the meta property of the model. Existing meta data is removed. Also the meta data of root state and children is loaded. :param str path: Optional path to the meta data file. If not given, the path will be derived from the state machine's path on the filesystem
def regroup(target, expression): if not target: return '' return [ {'grouper': key, 'list': list(val)} for key, val in groupby(obj_list, lambda v, f=expression.resolve: f(v, True)) ]
Regroups a list of alike objects by a common attribute. This complex tag is best illustrated by use of an example: say that ``people`` is a list of ``Person`` objects that have ``first_name``, ``last_name``, and ``gender`` attributes, and you'd like to display a list that looks like: * Male: * George Bush * Bill Clinton * Female: * Margaret Thatcher * Colendeeza Rice * Unknown: * Pat Smith The following snippet of template code would accomplish this dubious task:: {% regroup people by gender as grouped %} <ul> {% for group in grouped %} <li>{{ group.grouper }} <ul> {% for item in group.list %} <li>{{ item }}</li> {% endfor %} </ul> {% endfor %} </ul> As you can see, ``{% regroup %}`` populates a variable with a list of objects with ``grouper`` and ``list`` attributes. ``grouper`` contains the item that was grouped by; ``list`` contains the list of objects that share that ``grouper``. In this case, ``grouper`` would be ``Male``, ``Female`` and ``Unknown``, and ``list`` is the list of people with those genders. Note that ``{% regroup %}`` does not work when the list to be grouped is not sorted by the key you are grouping by! This means that if your list of people was not sorted by gender, you'd need to make sure it is sorted before using it, i.e.:: {% regroup people|dictsort:"gender" by gender as grouped %}
def describe(DomainName, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) try: domain = conn.describe_elasticsearch_domain_config(DomainName=DomainName) if domain and 'DomainConfig' in domain: domain = domain['DomainConfig'] keys = ('ElasticsearchClusterConfig', 'EBSOptions', 'AccessPolicies', 'SnapshotOptions', 'AdvancedOptions') return {'domain': dict([(k, domain.get(k, {}).get('Options')) for k in keys if k in domain])} else: return {'domain': None} except ClientError as e: return {'error': __utils__['boto3.get_error'](e)}
Given a domain name describe its properties. Returns a dictionary of interesting properties. CLI Example: .. code-block:: bash salt myminion boto_elasticsearch_domain.describe mydomain
def vars_class(cls): return dict(chain.from_iterable( vars(cls).items() for cls in reversed(cls.__mro__)))
Return a dict of vars for the given class, including all ancestors. This differs from the usual behaviour of `vars` which returns attributes belonging to the given class and not its ancestors.
def InterpolateValue(self, value, type_info_obj=type_info.String(), default_section=None, context=None): if isinstance(value, Text): try: value = StringInterpolator( value, self, default_section=default_section, parameter=type_info_obj.name, context=context).Parse() except InterpolationError as e: message = "{cause}: {detail}".format(cause=e, detail=value) raise type(e)(message) value = type_info_obj.FromString(value) if isinstance(value, list): value = [ self.InterpolateValue( v, default_section=default_section, context=context) for v in value ] return value
Interpolate the value and parse it with the appropriate type.
def interstore(self, dest, *others): keys = [self.key] keys.extend([other.key for other in others]) self.database.sinterstore(dest, keys) return self.database.Set(dest)
Store the intersection of the current set and one or more others in a new key. :param dest: the name of the key to store intersection :param others: One or more :py:class:`Set` instances :returns: A :py:class:`Set` referencing ``dest``.
def extended_fade_out(self, segment, duration): dur = int(duration * segment.track.samplerate) if segment.start + segment.duration + dur <\ segment.track.duration: segment.duration += dur else: raise Exception( "Cannot create fade-out that extends past the track's end") score_loc_in_seconds = segment.comp_location_in_seconds +\ segment.duration_in_seconds - duration f = Fade(segment.track, score_loc_in_seconds, duration, 1.0, 0.0) self.add_dynamic(f) return f
Add a fade-out to a segment that extends the beginning of the segment. :param segment: Segment to fade out :type segment: :py:class:`radiotool.composer.Segment` :param duration: Duration of fade-out (in seconds) :returns: The fade that has been added to the composition :rtype: :py:class:`Fade`
def happybirthday(person): print('Happy Birthday To You') time.sleep(2) print('Happy Birthday To You') time.sleep(2) print('Happy Birthday Dear ' + str(person[0].upper()) + str(person[1:])) time.sleep(2) print('Happy Birthday To You')
Sing Happy Birthday
def cylinder(target, throat_length='throat.length', throat_diameter='throat.diameter'): r leng = target[throat_length] diam = target[throat_diameter] value = _sp.pi/4*leng*diam**2 return value
r""" Calculate throat volume assuing a cylindrical shape Parameters ---------- target : OpenPNM Object The object which this model is associated with. This controls the length of the calculated array, and also provides access to other necessary properties. throat_length and throat_diameter : strings The dictionary keys containing the arrays with the throat diameter and length values. Notes ----- At present this models does NOT account for the volume reprsented by the intersection of the throat with a spherical pore body.
def sequence(self): seq = [x.mol_code for x in self._monomers] return ' '.join(seq)
Returns the sequence of the `Polynucleotide` as a string. Returns ------- sequence : str String of the monomer sequence of the `Polynucleotide`.
def disconnect(self, reason): self._server._disconnect_client(self._conn_key, self, reason)
Disconnect this client connection for specified reason
def rule(self): step_ratio = self.step_ratio method = self.method if method in ('multicomplex', ) or self.n == 0: return np.ones((1,)) order, method_order = self.n - 1, self._method_order parity = self._parity(method, order, method_order) step = self._richardson_step() num_terms, ix = (order + method_order) // step, order // step fd_rules = FD_RULES.get((step_ratio, parity, num_terms)) if fd_rules is None: fd_mat = self._fd_matrix(step_ratio, parity, num_terms) fd_rules = linalg.pinv(fd_mat) FD_RULES[(step_ratio, parity, num_terms)] = fd_rules if self._flip_fd_rule: return -fd_rules[ix] return fd_rules[ix]
Return finite differencing rule. The rule is for a nominal unit step size, and must be scaled later to reflect the local step size. Member methods used ------------------- _fd_matrix Member variables used --------------------- n order method
def next_task(self): node = self._find_next_ready_node() if node is None: return None executor = node.get_executor() if executor is None: return None tlist = executor.get_all_targets() task = self.tasker(self, tlist, node in self.original_top, node) try: task.make_ready() except Exception as e : self.ready_exc = sys.exc_info() if self.ready_exc: task.exception_set(self.ready_exc) self.ready_exc = None return task
Returns the next task to be executed. This simply asks for the next Node to be evaluated, and then wraps it in the specific Task subclass with which we were initialized.
def find_outputs_in_range(self, ifo, current_segment, useSplitLists=False): currsegment_list = segments.segmentlist([current_segment]) overlap_files = self.find_all_output_in_range(ifo, current_segment, useSplitLists=useSplitLists) overlap_windows = [abs(i.segment_list & currsegment_list) for i in overlap_files] if not overlap_windows: return [] overlap_windows = numpy.array(overlap_windows, dtype = int) segmentLst = overlap_files[overlap_windows.argmax()].segment_list output_files = [f for f in overlap_files if f.segment_list==segmentLst] return output_files
Return the list of Files that is most appropriate for the supplied time range. That is, the Files whose coverage time has the largest overlap with the supplied time range. Parameters ----------- ifo : string Name of the ifo (or ifos) that the File should correspond to current_segment : glue.segment.segment The segment of time that files must intersect. Returns -------- FileList class The list of Files that are most appropriate for the time range
def add_connection_score(self, node): conntime = node.seconds_until_connect_ok() if conntime > 0: self.log("not considering %r for new connection; has %r left on " "connect blackout" % (node, conntime)) return -conntime numconns = self.num_connectors_to(node) if numconns >= self.max_connections_per_node: return float('-Inf') return sys.maxint - numconns
Return a numeric value that determines this node's score for adding a new connection. A negative value indicates that no connections should be made to this node for at least that number of seconds. A value of -inf indicates no connections should be made to this node for the foreseeable future. This score should ideally take into account the connectedness of available nodes, so that those with less current connections will get more.
def get_average_along_axis(self, ind): m = self.data["total"] ng = self.dim if ind == 0: total = np.sum(np.sum(m, axis=1), 1) elif ind == 1: total = np.sum(np.sum(m, axis=0), 1) else: total = np.sum(np.sum(m, axis=0), 0) return total / ng[(ind + 1) % 3] / ng[(ind + 2) % 3]
Get the averaged total of the volumetric data a certain axis direction. For example, useful for visualizing Hartree Potentials from a LOCPOT file. Args: ind (int): Index of axis. Returns: Average total along axis
def load(self): if self._dict is None: if self.dirty: self._dict = self._loader(self.filename) self.cache() else: with open(self.cachename, 'rb') as stream: self._dict = cPickle.load(stream) return self._dict
Loads the Python object Loads the Python object, either via loader(filename) or the pickled cache file, whichever was modified most recently.
def text(self): texts = [] for child in self.childs: if isinstance(child, Tag): texts.append(child.text()) elif isinstance(child, Content): texts.append(child.render()) else: texts.append(child) return " ".join(texts)
Renders the contents inside this element, without html tags.
def userlogin(self, event): client_uuid = event.clientuuid self.log(event.user, pretty=True, lvl=verbose) self.log('Adding client') self.clients[event.clientuuid] = event.user for topic, alert in self.alerts.items(): self.alert(client_uuid, alert)
Checks if an alert is ongoing and alerts the newly connected client, if so.
def setSystemProperty(cls, key, value): SparkContext._ensure_initialized() SparkContext._jvm.java.lang.System.setProperty(key, value)
Set a Java system property, such as spark.executor.memory. This must must be invoked before instantiating SparkContext.
def _effectupdate_raise_line_padding_on_focus(self, time_passed): data = self._effects['raise-line-padding-on-focus'] pps = data['padding_pps'] for i, option in enumerate(self.options): if i == self.option: if option['padding_line'] < data['padding']: option['padding_line'] += pps * time_passed elif option['padding_line'] > data['padding']: option['padding_line'] = data['padding'] elif option['padding_line']: if option['padding_line'] > 0: option['padding_line'] -= pps * time_passed elif option['padding_line'] < 0: option['padding_line'] = 0
Gradually enlarge the padding of the focused line.
def relieve_all_models(self): map(self.relieve_model, list(self.__registered_models)) self.__registered_models.clear()
Relieve all registered models The method uses the set of registered models to relieve them.
def bk_light(cls): "Make the current background color light." wAttributes = cls._get_text_attributes() wAttributes |= win32.BACKGROUND_INTENSITY cls._set_text_attributes(wAttributes)
Make the current background color light.
def _is_accepted(self, element_tag): element_tag = element_tag.lower() if self._ignored_tags is not None \ and element_tag in self._ignored_tags: return False if self._followed_tags is not None: return element_tag in self._followed_tags else: return True
Return if the link is accepted by the filters.
def GetNodeStorageUnits(r, node, storage_type, output_fields): query = { "storage_type": storage_type, "output_fields": output_fields, } return r.request("get", "/2/nodes/%s/storage" % node, query=query)
Gets the storage units for a node. @type node: str @param node: the node whose storage units to return @type storage_type: str @param storage_type: storage type whose units to return @type output_fields: str @param output_fields: storage type fields to return @rtype: int @return: job id where results can be retrieved
def get_fields(self, strip_labels=False): if strip_labels: return [ f[0] if type(f) in (tuple, list) else f for f in self.fields ] return self.fields
Hook to dynamically change the fields that will be displayed
def second_textx_model(self, model_parser): if self.grammar_parser.debug: self.grammar_parser.dprint("RESOLVING MODEL PARSER: second_pass") self._resolve_rule_refs(self.grammar_parser, model_parser) self._determine_rule_types(model_parser.metamodel) self._resolve_cls_refs(self.grammar_parser, model_parser) return model_parser
Cross reference resolving for parser model.
def __calculate_audio_frames(self): if self.audioformat is None: return start_frame = self.clock.current_frame totalsize = int(self.clip.audio.fps*self.clip.audio.duration) self.audio_times = list(range(0, totalsize, self.audioformat['buffersize'])) + [totalsize] del(self.audio_times[0:start_frame])
Aligns audio with video. This should be called for instance after a seeking operation or resuming from a pause.
def update_nb_metadata(nb_path=None, title=None, summary=None, keywords='fastai', overwrite=True, **kwargs): "Creates jekyll metadata for given notebook path." nb = read_nb(nb_path) data = {'title': title, 'summary': summary, 'keywords': keywords, **kwargs} data = {k:v for (k,v) in data.items() if v is not None} if not data: return nb['metadata']['jekyll'] = data write_nb(nb, nb_path) NotebookNotary().sign(nb)
Creates jekyll metadata for given notebook path.
def contains_info(self, key, value): if self.library is None: return 0 load = self.library.load_card matches = 0 for code in self.cards: card = load(code) if card.get_info(key) == value: matches += 1 return matches
Returns how many cards in the deck have the specified value under the specified key in their info data. This method requires a library to be stored in the deck instance and will return `None` if there is no library.
def _format_type(lines, element, spacer=""): rlines = [] rlines.append(element.signature) _format_summary(rlines, element) rlines.append("") _format_generic(rlines, element, ["summary"]) if len(element.executables) > 0: rlines.append("\nEMBEDDED PROCEDURES") for key, value in list(element.executables.items()): rlines.append(" {}".format(value.__str__())) target = value.target if target is not None: _format_executable(rlines, target, " ") if len(element.members) > 0: rlines.append("\nEMBEDDED MEMBERS") for key, value in list(element.members.items()): _format_value_element(rlines, value, " ") lines.extend([spacer + l for l in rlines])
Formats a derived type for full documentation output.
def split(str, pattern, limit=-1): sc = SparkContext._active_spark_context return Column(sc._jvm.functions.split(_to_java_column(str), pattern, limit))
Splits str around matches of the given pattern. :param str: a string expression to split :param pattern: a string representing a regular expression. The regex string should be a Java regular expression. :param limit: an integer which controls the number of times `pattern` is applied. * ``limit > 0``: The resulting array's length will not be more than `limit`, and the resulting array's last entry will contain all input beyond the last matched pattern. * ``limit <= 0``: `pattern` will be applied as many times as possible, and the resulting array can be of any size. .. versionchanged:: 3.0 `split` now takes an optional `limit` field. If not provided, default limit value is -1. >>> df = spark.createDataFrame([('oneAtwoBthreeC',)], ['s',]) >>> df.select(split(df.s, '[ABC]', 2).alias('s')).collect() [Row(s=[u'one', u'twoBthreeC'])] >>> df.select(split(df.s, '[ABC]', -1).alias('s')).collect() [Row(s=[u'one', u'two', u'three', u''])]
def pub(topic_name, json_msg, repeat_rate=None, host=jps.env.get_master_host(), pub_port=jps.DEFAULT_PUB_PORT): pub = jps.Publisher(topic_name, host=host, pub_port=pub_port) time.sleep(0.1) if repeat_rate is None: pub.publish(json_msg) else: try: while True: pub.publish(json_msg) time.sleep(1.0 / repeat_rate) except KeyboardInterrupt: pass
publishes the data to the topic :param topic_name: name of the topic :param json_msg: data to be published :param repeat_rate: if None, publishes once. if not None, it is used as [Hz].
def _get_has_relation_query(self, relation): from .relations import Relation return Relation.no_constraints( lambda: getattr(self.get_model(), relation)() )
Get the "has" relation base query :type relation: str :rtype: Builder
def _validate_cert_format(name): cert_formats = ['cer', 'pfx'] if name not in cert_formats: message = ("Invalid certificate format '{0}' specified. Valid formats:" ' {1}').format(name, cert_formats) raise SaltInvocationError(message)
Ensure that the certificate format, as determind from user input, is valid.
def uniform_crossover(parents): chromosome_length = len(parents[0]) children = [[], []] for i in range(chromosome_length): selected_parent = random.randint(0, 1) children[0].append(parents[selected_parent][i]) children[1].append(parents[1 - selected_parent][i]) return children
Perform uniform crossover on two parent chromosomes. Randomly take genes from one parent or the other. Ex. p1 = xxxxx, p2 = yyyyy, child = xyxxy
def _rule_compare(rule1, rule2): commonkeys = set(rule1.keys()).intersection(rule2.keys()) for key in commonkeys: if rule1[key] != rule2[key]: return False return True
Compare the common keys between security group rules against eachother
def Param(name, value=None, unit=None, ucd=None, dataType=None, utype=None, ac=True): atts = locals() atts.pop('ac') temp_dict = {} temp_dict.update(atts) for k in temp_dict.keys(): if atts[k] is None: del atts[k] if (ac and value is not None and (not isinstance(value, string_types)) and dataType is None ): if type(value) in _datatypes_autoconversion: datatype, func = _datatypes_autoconversion[type(value)] atts['dataType'] = datatype atts['value'] = func(value) return objectify.Element('Param', attrib=atts)
'Parameter', used as a general purpose key-value entry in the 'What' section. May be assembled into a :class:`Group`. NB ``name`` is not mandated by schema, but *is* mandated in full spec. Args: value(str): String representing parameter value. Or, if ``ac`` is true, then 'autoconversion' is attempted, in which case ``value`` can also be an instance of one of the following: * :py:obj:`bool` * :py:obj:`int` * :py:obj:`float` * :py:class:`datetime.datetime` This allows you to create Params without littering your code with string casts, or worrying if the passed value is a float or a string, etc. NB the value is always *stored* as a string representation, as per VO spec. unit(str): Units of value. See :class:`.definitions.units` ucd(str): `unified content descriptor <http://arxiv.org/abs/1110.0525>`_. For a list of valid UCDs, see: http://vocabularies.referata.com/wiki/Category:IVOA_UCD. dataType(str): Denotes type of ``value``; restricted to 3 options: ``string`` (default), ``int`` , or ``float``. (NB *not* to be confused with standard XML Datatypes, which have many more possible values.) utype(str): See http://wiki.ivoa.net/twiki/bin/view/IVOA/Utypes ac(bool): Attempt automatic conversion of passed ``value`` to string, and set ``dataType`` accordingly (only attempted if ``dataType`` is the default, i.e. ``None``). (NB only supports types listed in _datatypes_autoconversion dict)
def build_agency(pfeed): return pd.DataFrame({ 'agency_name': pfeed.meta['agency_name'].iat[0], 'agency_url': pfeed.meta['agency_url'].iat[0], 'agency_timezone': pfeed.meta['agency_timezone'].iat[0], }, index=[0])
Given a ProtoFeed, return a DataFrame representing ``agency.txt``
def remove_section(self, section_name): if section_name == "DEFAULT": raise Exception("'DEFAULT' is reserved section name.") if section_name in self._sections: del self._sections[section_name] else: raise Exception("Error! cannot find section '%s'.")
Remove a section, it cannot be the DEFAULT section.
def _load(self, scale=0.001): ncf = Dataset(self.path, 'r') bandnum = OLCI_BAND_NAMES.index(self.bandname) resp = ncf.variables[ 'mean_spectral_response_function'][bandnum, :] wvl = ncf.variables[ 'mean_spectral_response_function_wavelength'][bandnum, :] * scale self.rsr = {'wavelength': wvl, 'response': resp}
Load the OLCI relative spectral responses
def _get_boolean(self, source, bitarray): raw_value = self._get_raw(source, bitarray) return { source['shortcut']: { 'description': source.get('description'), 'unit': source.get('unit', ''), 'value': True if raw_value else False, 'raw_value': raw_value, } }
Get boolean value, based on the data in XML
def environment(**kv): added = [] changed = {} for key, value in kv.items(): if key not in os.environ: added.append(key) else: changed[key] = os.environ[key] os.environ[key] = value yield for key in added: del os.environ[key] for key in changed: os.environ[key] = changed[key]
Context manager to run Python code with a modified UNIX process environment. All key/value pairs in the keyword arguments are added (or changed, if the key names an existing environmental variable) in the process environment upon entrance into the context. Changes are undone upon exit: added environmental variables are removed from the environment, and those whose value was changed are reset to their pristine value.
def update(self, iterable): for pair in pairwise_longest(iterable, fillvalue=_FILL): self._edges.append(pair) self._results = None
Update with an ordered iterable of items. Args: iterable: An ordered iterable of items. The relative order of the items in this iterable will be respected in the TopoSet (in the absence of cycles).
def get_dict(self, obj, state=None, base_name='View'): return self.get_dict_for_class(class_name=obj.__class__, state=obj.state, base_name=base_name)
The style dict for a view instance.
def plot_trajectory(*args, **kwargs): interactive = kwargs.pop('interactive', True) if interactive: plot_trajectory_with_elegans(*args, **kwargs) else: plot_trajectory_with_matplotlib(*args, **kwargs)
Generate a plot from received instance of TrajectoryObserver and show it See also plot_trajectory_with_elegans and plot_trajectory_with_matplotlib. Parameters ---------- obs : TrajectoryObserver TrajectoryObserver to render. interactive : bool, default True Choose a visualizer. If False, show the plot with matplotlib. If True (only available on IPython Notebook), show it with elegans. Examples -------- >>> plot_trajectory(obs) >>> plot_trajectory(obs, interactive=False)
def addImagePath(new_path): if os.path.exists(new_path): Settings.ImagePaths.append(new_path) elif "http://" in new_path or "https://" in new_path: request = requests.get(new_path) if request.status_code < 400: Settings.ImagePaths.append(new_path) else: raise OSError("Unable to connect to " + new_path) else: raise OSError("File not found: " + new_path)
Convenience function. Adds a path to the list of paths to search for images. Can be a URL (but must be accessible).
def _wait(starting_time, first_timestamp, timestamp): target_time = starting_time + (timestamp - first_timestamp) time.sleep(max(target_time - time.time(), 0))
Given that the first timestamp in the trace file is ``first_timestamp`` and we started playing back the file at ``starting_time``, block until the current ``timestamp`` should occur.
def is_optional(attr): return isinstance(attr.validator, _OptionalValidator) or (attr.default is not None and attr.default is not NOTHING)
Helper method to find if an attribute is mandatory :param attr: :return:
def shutdown_kernel(self): kernel_id = self.get_kernel_id() if kernel_id: delete_url = self.add_token(url_path_join(self.server_url, 'api/kernels/', kernel_id)) delete_req = requests.delete(delete_url) if delete_req.status_code != 204: QMessageBox.warning( self, _("Server error"), _("The Jupyter Notebook server " "failed to shutdown the kernel " "associated with this notebook. " "If you want to shut it down, " "you'll have to close Spyder."))
Shutdown the kernel of the client.
def newick(self, tree_format=0): "Returns newick represenation of the tree in its current state." if self.treenode.children: features = {"name", "dist", "support", "height", "idx"} testnode = self.treenode.children[0] extrafeat = {i for i in testnode.features if i not in features} features.update(extrafeat) return self.treenode.write(format=tree_format)
Returns newick represenation of the tree in its current state.
def compstat(sdat, tstart=None, tend=None): data = sdat.tseries_between(tstart, tend) time = data['t'].values delta_time = time[-1] - time[0] data = data.iloc[:, 1:].values mean = np.trapz(data, x=time, axis=0) / delta_time rms = np.sqrt(np.trapz((data - mean)**2, x=time, axis=0) / delta_time) with open(misc.out_name('statistics.dat'), 'w') as out_file: mean.tofile(out_file, sep=' ', format="%10.5e") out_file.write('\n') rms.tofile(out_file, sep=' ', format="%10.5e") out_file.write('\n')
Compute statistics from series output by StagYY. Create a file 'statistics.dat' containing the mean and standard deviation of each series on the requested time span. Args: sdat (:class:`~stagpy.stagyydata.StagyyData`): a StagyyData instance. tstart (float): starting time. Set to None to start at the beginning of available data. tend (float): ending time. Set to None to stop at the end of available data.
def process_composite_comment(self, level, comments, key): if key not in comments: comment = "" else: value = comments[key] spacer = self.whitespace(level, 0) if isinstance(value, list): comments = [self.format_comment(spacer, v) for v in value] comment = self.newlinechar.join(comments) else: comment = self.format_comment(spacer, value) return comment
Process comments for composites such as MAP, LAYER etc.
def kappa_se_calc(PA, PE, POP): try: result = math.sqrt((PA * (1 - PA)) / (POP * ((1 - PE)**2))) return result except Exception: return "None"
Calculate kappa standard error. :param PA: observed agreement among raters (overall accuracy) :type PA : float :param PE: hypothetical probability of chance agreement (random accuracy) :type PE : float :param POP: population :type POP:int :return: kappa standard error as float
def get(postcode): postcode = quote(postcode.replace(' ', '')) url = '%s/postcode/%s.json' % (END_POINT, postcode) return _get_json_resp(url)
Request data associated with `postcode`. :param postcode: the postcode to search for. The postcode may contain spaces (they will be removed). :returns: a dict of the nearest postcode's data or None if no postcode data is found.
def create_payload(self): payload = super(OverrideValue, self).create_payload() if hasattr(self, 'smart_class_parameter'): del payload['smart_class_parameter_id'] if hasattr(self, 'smart_variable'): del payload['smart_variable_id'] return payload
Remove ``smart_class_parameter_id`` or ``smart_variable_id``
def curtailment(self): if self._curtailment is not None: result_dict = {} for key, gen_list in self._curtailment.items(): curtailment_df = pd.DataFrame() for gen in gen_list: curtailment_df[gen] = gen.curtailment result_dict[key] = curtailment_df return result_dict else: return None
Holds curtailment assigned to each generator per curtailment target. Returns ------- :obj:`dict` with :pandas:`pandas.DataFrame<dataframe>` Keys of the dictionary are generator types (and weather cell ID) curtailment targets were given for. E.g. if curtailment is provided as a :pandas:`pandas.DataFrame<dataframe>` with :pandas.`pandas.MultiIndex` columns with levels 'type' and 'weather cell ID' the dictionary key is a tuple of ('type','weather_cell_id'). Values of the dictionary are dataframes with the curtailed power in kW per generator and time step. Index of the dataframe is a :pandas:`pandas.DatetimeIndex<datetimeindex>`. Columns are the generators of type :class:`edisgo.grid.components.GeneratorFluctuating`.
def _init_peewee_ext(cls, app, dummy_configuration=None, dummy_configure_args=None): if 'DATABASE' not in app.config: app.add_post_configure_callback(partial(cls._init_peewee_ext, app), run_once=True) return _PEEWEE_EXT.init_app(app)
Init the actual PeeWee extension with the app that was created. Since PeeWee requires the ``DATABASE`` config parameter to be present IMMEDIATELY upon initializing the application, we need to delay this construction. This is because, in standard use, we will create the app and attempt to init this extension BEFORE we configure the app, which is totally fine. To fix this, we just need to set this up to try and run after every call to configure. If there is not ``DATABASE`` config parameter present when run, this method does nothing other than reschedule itself to run in the future. In all cases, this is a Post Configure Hook that should RUN ONCE! Args: app (flask.Flask): The application you want to init the PeeWee Flask extension for. Hint: if you need to use this as a callback, use a partial to provide this. dummy_configuration (dict): The resulting application configuration that the post_configure hook provides to all of it's callbacks. We will NEVER use this, but since we utilize the post_configure system to register this for complicated apps, we gotta accept it. dummy_configure_args (list[object]): The args passed to the :meth:`configure` function that triggered this callback. Just like the above arg, we'll never use it, but we must accept it.
def similarity_by_path(sense1: "wn.Synset", sense2: "wn.Synset", option: str = "path") -> float: if option.lower() in ["path", "path_similarity"]: return max(wn.path_similarity(sense1, sense2, if_none_return=0), wn.path_similarity(sense2, sense1, if_none_return=0)) elif option.lower() in ["wup", "wupa", "wu-palmer", "wu-palmer"]: return max(wn.wup_similarity(sense1, sense2, if_none_return=0), wn.wup_similarity(sense2, sense1, if_none_return=0)) elif option.lower() in ['lch', "leacock-chordorow"]: if sense1.pos != sense2.pos: return 0 return wn.lch_similarity(sense1, sense2, if_none_return=0)
Returns maximum path similarity between two senses. :param sense1: A synset. :param sense2: A synset. :param option: String, one of ('path', 'wup', 'lch'). :return: A float, similarity measurement.
def _internal_reschedule(callback, retry=3, sleep_time=constants.DEFAULT_SLEEP): for foo in range(retry): container_process = callback[0](callback[1], *callback[2], **callback[3]) time.sleep(sleep_time) container_process.poll() rcode = container_process.returncode if rcode is None: return container_process raise ConuException("Unable to start nspawn container - process failed for {}-times".format(retry))
workaround method for internal_run_container method It sometimes fails because of Dbus or whatever, so try to start it moretimes :param callback: callback method list :param retry: how many times try to invoke command :param sleep_time: how long wait before subprocess.poll() to find if it failed :return: subprocess object
def create_pos(self, name, pos_type, pos_id, location=None): arguments = {'name': name, 'type': pos_type, 'id': pos_id, 'location': location} return self.do_req('POST', self.merchant_api_base_url + '/pos/', arguments).json()
Create POS resource Arguments: name: Human-readable name of the POS, used for displaying payment request origin to end user pos_type: POS type location: Merchant location pos_id: The ID of the POS that is to be created. Has to be unique for the merchant
def verify_response(self, response_json, signed_id_name='transactionid'): auth_json = response_json.get('auth', {}) nonce = auth_json.get('nonce', '') timestamp = auth_json.get('timestamp', '') signature = binascii.unhexlify(auth_json.get('signature', '')) signed_id = response_json.get(signed_id_name, '') return self.verify_signature(signature=signature, nonce=nonce, timestamp=timestamp, signed_id=signed_id)
Verify the response message. :param response_json: :param signed_id_name: :return:
def append_sample(self, v, vartype, _left=False): vstr = str(v).rjust(2) length = len(vstr) if vartype is dimod.SPIN: def f(datum): return _spinstr(datum.sample[v], rjust=length) else: def f(datum): return _binarystr(datum.sample[v], rjust=length) self.append(vstr, f, _left=_left)
Add a sample column
def _populate_common_request(self, request): url_record = self._item_session.url_record if url_record.parent_url and not request.fields.get('Referer'): self._add_referrer(request, url_record) if self._fetch_rule.http_login: request.username, request.password = self._fetch_rule.http_login
Populate the Request with common fields.
def do_selection_reduction_to_one_parent(selection): all_models_selected = selection.get_all() parent_m_count_dict = {} for model in all_models_selected: parent_m_count_dict[model.parent] = parent_m_count_dict[model.parent] + 1 if model.parent in parent_m_count_dict else 1 parent_m = None current_count_parent = 0 for possible_parent_m, count in parent_m_count_dict.items(): parent_m = possible_parent_m if current_count_parent < count else parent_m if len(selection.states) == 1 and selection.get_selected_state().state.is_root_state: parent_m = None if len(all_models_selected) > 1: selection.set(selection.get_selected_state()) if parent_m is not None: for model in all_models_selected: if model.parent is not parent_m: selection.remove(model) return parent_m
Find and reduce selection to one parent state. :param selection: :return: state model which is parent of selection or None if root state
def select_radio_button(self, key): key_index = list(self._parameter.options.keys()).index(key) radio_button = self.input_button_group.button(key_index) radio_button.click()
Helper to select a radio button with key. :param key: The key of the radio button. :type key: str
def setArticleThreshold(self, value): assert isinstance(value, int) assert value >= 0 self.topicPage["articleTreshWgt"] = value
what is the minimum total weight that an article has to have in order to get it among the results? @param value: threshold to use
def top(self, num, key=None): def topIterator(iterator): yield heapq.nlargest(num, iterator, key=key) def merge(a, b): return heapq.nlargest(num, a + b, key=key) return self.mapPartitions(topIterator).reduce(merge)
Get the top N elements from an RDD. .. note:: This method should only be used if the resulting array is expected to be small, as all the data is loaded into the driver's memory. .. note:: It returns the list sorted in descending order. >>> sc.parallelize([10, 4, 2, 12, 3]).top(1) [12] >>> sc.parallelize([2, 3, 4, 5, 6], 2).top(2) [6, 5] >>> sc.parallelize([10, 4, 2, 12, 3]).top(3, key=str) [4, 3, 2]
def has_child_banks(self, bank_id): if self._catalog_session is not None: return self._catalog_session.has_child_catalogs(catalog_id=bank_id) return self._hierarchy_session.has_children(id_=bank_id)
Tests if a bank has any children. arg: bank_id (osid.id.Id): a ``bank_id`` return: (boolean) - ``true`` if the ``bank_id`` has children, ``false`` otherwise raise: NotFound - ``bank_id`` is not found raise: NullArgument - ``bank_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def __init_configsvrs(self, params): self._configsvrs = [] for cfg in params: cfg = self._strip_auth(cfg) server_id = cfg.pop('server_id', None) version = cfg.pop('version', self._version) cfg.update({'configsvr': True}) if self.enable_ipv6: common.enable_ipv6_single(cfg) self._configsvrs.append(Servers().create( 'mongod', cfg, sslParams=self.sslParams, autostart=True, version=version, server_id=server_id))
create and start config servers
def shlex_split(s, **kwargs): if isinstance(s, six.string_types): return salt.utils.data.decode( shlex.split(salt.utils.stringutils.to_str(s), **kwargs) ) else: return s
Only split if variable is a string
def recursive_cov(self, cov, length, mean, chain, scaling=1, epsilon=0): r n = length + len(chain) k = length new_mean = self.recursive_mean(mean, length, chain) t0 = k * np.outer(mean, mean) t1 = np.dot(chain.T, chain) t2 = n * np.outer(new_mean, new_mean) t3 = epsilon * np.eye(cov.shape[0]) new_cov = ( k - 1) / ( n - 1.) * cov + scaling / ( n - 1.) * ( t0 + t1 - t2 + t3) return new_cov, new_mean
r"""Compute the covariance recursively. Return the new covariance and the new mean. .. math:: C_k & = \frac{1}{k-1} (\sum_{i=1}^k x_i x_i^T - k\bar{x_k}\bar{x_k}^T) C_n & = \frac{1}{n-1} (\sum_{i=1}^k x_i x_i^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T) & = \frac{1}{n-1} ((k-1)C_k + k\bar{x_k}\bar{x_k}^T + \sum_{i=k+1}^n x_i x_i^T - n\bar{x_n}\bar{x_n}^T) :Parameters: - cov : matrix Previous covariance matrix. - length : int Length of chain used to compute the previous covariance. - mean : array Previous mean. - chain : array Sample used to update covariance. - scaling : float Scaling parameter - epsilon : float Set to a small value to avoid singular matrices.
def line(self, text=''): self.out.write(text) self.out.write('\n')
A simple helper to write line with `\n`
def participant_names(self): with self._mutex: return [obj.get_component_profile().instance_name \ for obj in self._participants]
The names of the RTObjects participating in this context.
def dict_has_any_keys(self, keys): if not _is_non_string_iterable(keys): keys = [keys] with cython_context(): return SArray(_proxy=self.__proxy__.dict_has_any_keys(keys))
Create a boolean SArray by checking the keys of an SArray of dictionaries. An element of the output SArray is True if the corresponding input element's dictionary has any of the given keys. Fails on SArrays whose data type is not ``dict``. Parameters ---------- keys : list A list of key values to check each dictionary against. Returns ------- out : SArray A SArray of int type, where each element indicates whether the input SArray element contains any key in the input list. See Also -------- dict_has_all_keys Examples -------- >>> sa = turicreate.SArray([{"this":1, "is":5, "dog":7}, {"animal":1}, {"this": 2, "are": 1, "cat": 5}]) >>> sa.dict_has_any_keys(["is", "this", "are"]) dtype: int Rows: 3 [1, 0, 1]
def _read_page_header(file_obj): tin = TFileTransport(file_obj) pin = TCompactProtocolFactory().get_protocol(tin) page_header = parquet_thrift.PageHeader() page_header.read(pin) return page_header
Read the page_header from the given fo.
def log(message, type): (sys.stdout if type == 'notice' else sys.stderr).write(message + "\n")
Log notices to stdout and errors to stderr
def create_identity(user_id, curve_name): result = interface.Identity(identity_str='gpg://', curve_name=curve_name) result.identity_dict['host'] = user_id return result
Create GPG identity for hardware device.
def _reset(self, command, *args, **kwargs): if self.indexable: self.deindex() result = self._traverse_command(command, *args, **kwargs) if self.indexable: self.index() return result
Shortcut for commands that reset values of the field. All will be deindexed and reindexed.
def time_coef(tc, nc, tb, nb): tc = float(tc) nc = float(nc) tb = float(tb) nb = float(nb) q = (tc * nb) / (tb * nc) return q
Return time coefficient relative to base numbers. @param tc: current test time @param nc: current test data size @param tb: base test time @param nb: base test data size @return: time coef.
def get_exitstatus(self): logger.debug("Exit status is {0}".format(self._spawn.exitstatus)) return self._spawn.exitstatus
Get the exit status of the program execution. Returns: int: Exit status as reported by the operating system, or None if it is not available.
def SensorDataDelete(self, sensor_id, data_id): if self.__SenseApiCall__('/sensors/{0}/data/{1}.json'.format(sensor_id, data_id), 'DELETE'): return True else: self.__error_ = "api call unsuccessful" return False
Delete a sensor datum from a specific sensor in CommonSense. @param sensor_id (int) - Sensor id of the sensor to delete data from @param data_id (int) - Id of the data point to delete @return (bool) - Boolean indicating whether SensorDataDelete was successful.
def _node_info(conn): raw = conn.getInfo() info = {'cpucores': raw[6], 'cpumhz': raw[3], 'cpumodel': six.text_type(raw[0]), 'cpus': raw[2], 'cputhreads': raw[7], 'numanodes': raw[4], 'phymemory': raw[1], 'sockets': raw[5]} return info
Internal variant of node_info taking a libvirt connection as parameter
def convert_values(self, matchdict: Dict[str, str]) -> Dict[str, Any]: converted = {} for varname, value in matchdict.items(): converter = self.converters[varname] converted[varname] = converter(value) return converted
convert values of ``matchdict`` with converter this object has.
def max_brightness(self): status_filename = os.path.join(self.path, 'max_brightness') with open(status_filename) as status_fp: result = status_fp.read() status_text = result.strip() try: status = int(status_text) except ValueError: return status_text return status
Get the device's maximum brightness level.
def assign_value(self, comp_def, value, src_ref): super().assign_value(comp_def, value, src_ref) if "rclr" in comp_def.properties: del comp_def.properties["rclr"] if "rset" in comp_def.properties: del comp_def.properties["rset"]
Overrides other related properties
def _schema_from_verb(verb, partial=False): from .verbs import Verbs return getattr(Verbs, verb)(partial=partial)
Return an instance of schema for given verb.
def is_valid_callsign(self, callsign, timestamp=timestamp_now): try: if self.get_all(callsign, timestamp): return True except KeyError: return False
Checks if a callsign is valid Args: callsign (str): Amateur Radio callsign timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC) Returns: bool: True / False Example: The following checks if "DH1TW" is a valid callsign >>> from pyhamtools import LookupLib, Callinfo >>> my_lookuplib = LookupLib(lookuptype="countryfile") >>> cic = Callinfo(my_lookuplib) >>> cic.is_valid_callsign("DH1TW") True
def convert_bytes_to_ints(in_bytes, num): dt = numpy.dtype('>i' + str(num)) return numpy.frombuffer(in_bytes, dt)
Convert a byte array into an integer array. The number of bytes forming an integer is defined by num :param in_bytes: the input bytes :param num: the number of bytes per int :return the integer array
def update(self): if 'id' in self._bug: result = self._bugsy.request('bug/%s' % self._bug['id']) self._bug = dict(**result['bugs'][0]) else: raise BugException("Unable to update bug that isn't in Bugzilla")
Update this object with the latest changes from Bugzilla >>> bug.status 'NEW' #Changes happen on Bugzilla >>> bug.update() >>> bug.status 'FIXED'
def _set_align(self, orientation, value): orientation_letter = orientation[0] possible_alignments = getattr( self, '_possible_{}aligns'.format(orientation_letter)) all_alignments = getattr( self, '_all_{}aligns'.format(orientation_letter)) if value not in possible_alignments: if value in all_alignments: msg = 'non-permitted' else: msg = 'non-existant' raise ValueError( "Can't set {} {} alignment {!r} on element {!r}".format( msg, orientation, value, self)) setattr(self, '_{}align'.format(orientation_letter), value)
We define a setter because it's better to diagnose this kind of programmatic error here than have to work out why alignment is odd when we sliently fail!
def get_resources_by_bin(self, bin_id): mgr = self._get_provider_manager('RESOURCE', local=True) lookup_session = mgr.get_resource_lookup_session_for_bin(bin_id, proxy=self._proxy) lookup_session.use_isolated_bin_view() return lookup_session.get_resources()
Gets the list of ``Resources`` associated with a ``Bin``. arg: bin_id (osid.id.Id): ``Id`` of a ``Bin`` return: (osid.resource.ResourceList) - list of related resources raise: NotFound - ``bin_id`` is not found raise: NullArgument - ``bin_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def from_dict(self, d): self.length = d.get("length", 0) self.instanceHigh = d.get("instanceHigh", 0) self.instanceMid = d.get("instanceMid", 0) self.instanceLow = d.get("instanceLow", 0) material = d.get("material", {'Data1':0, 'Data2':0, 'Data3':0, 'Data4': [0 for i in range(8)]}) self.Data1 = material.get('Data1', 0) self.Data2 = material.get('Data2', 0) self.Data3 = material.get('Data3', 0) self.Data4 = material.get("Data4", [0 for i in range(8)]) self.SMPTELabel = d.get("SMPTELabel", [0 for i in range(12)])
Set MobID from a dict
def set_connection(self, service_name, to_cache): self.services.setdefault(service_name, {}) self.services[service_name]['connection'] = to_cache
Sets a connection class within the cache. :param service_name: The service a given ``Connection`` talks to. Ex. ``sqs``, ``sns``, ``dynamodb``, etc. :type service_name: string :param to_cache: The class to be cached for the service. :type to_cache: class
def _update_items(self, items): self.items = items self.map = {item: idx for (idx, item) in enumerate(items)}
Replace the 'items' list of this OrderedSet with a new one, updating self.map accordingly.