code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_next_base26(prev=None): if not prev: return 'a' r = re.compile("^[a-z]*$") if not r.match(prev): raise ValueError("Invalid base26") if not prev.endswith('z'): return prev[:-1] + chr(ord(prev[-1]) + 1) return get_next_base26(prev[:-1]) + 'a'
Increment letter-based IDs. Generates IDs like ['a', 'b', ..., 'z', 'aa', ab', ..., 'az', 'ba', ...] Returns: str: Next base-26 ID.
def _read_xml_db(self): try: metadata_str = self.db_io.read_metadata_from_uri( self.layer_uri, 'xml') root = ElementTree.fromstring(metadata_str) return root except HashNotFoundError: return None
read metadata from an xml string stored in a DB. :return: the root element of the xml :rtype: ElementTree.Element
def display(self): lg.debug('GraphicsScene is between {}s and {}s'.format(self.minimum, self.maximum)) x_scale = 1 / self.parent.value('overview_scale') lg.debug('Set scene x-scaling to {}'.format(x_scale)) self.scale(1 / self.transform().m11(), 1) self.scale(x_scale, 1) self.scene = QGraphicsScene(self.minimum, 0, self.maximum, TOTAL_HEIGHT) self.setScene(self.scene) self.idx_markers = [] self.idx_annot = [] self.display_current() for name, pos in BARS.items(): item = QGraphicsRectItem(self.minimum, pos['pos0'], self.maximum, pos['pos1']) item.setToolTip(pos['tip']) self.scene.addItem(item) self.add_timestamps()
Updates the widgets, especially based on length of recordings.
def altimeter(alt: Number, unit: str = 'inHg') -> str: ret = 'Altimeter ' if not alt: ret += 'unknown' elif unit == 'inHg': ret += core.spoken_number(alt.repr[:2]) + ' point ' + core.spoken_number(alt.repr[2:]) elif unit == 'hPa': ret += core.spoken_number(alt.repr) return ret
Format altimeter details into a spoken word string
def _populateFromVariantFile(self, varFile, dataUrl, indexFile): if varFile.index is None: raise exceptions.NotIndexedException(dataUrl) for chrom in varFile.index: chrom, _, _ = self.sanitizeVariantFileFetch(chrom) if not isEmptyIter(varFile.fetch(chrom)): if chrom in self._chromFileMap: raise exceptions.OverlappingVcfException(dataUrl, chrom) self._chromFileMap[chrom] = dataUrl, indexFile self._updateMetadata(varFile) self._updateCallSetIds(varFile) self._updateVariantAnnotationSets(varFile, dataUrl)
Populates the instance variables of this VariantSet from the specified pysam VariantFile object.
def get_issues(): issues = [] for entry in Logger.journal: if entry.level >= WARNING: issues.append(entry) return issues
Get actual issues in the journal.
def _attr_func_(self): "Special property containing functions to be lazily-evaluated." try: return self.__attr_func except AttributeError: self.__attr_func = type( ''.join([type(self).__name__, 'EmptyFuncs']), (), { '__module__': type(self).__module__, '__slots__': () } )() return self.__attr_func
Special property containing functions to be lazily-evaluated.
def create(cls, name, ne_ref=None, operator='exclusion', sub_expression=None, comment=None): sub_expression = [] if sub_expression is None else [sub_expression] json = {'name': name, 'operator': operator, 'ne_ref': ne_ref, 'sub_expression': sub_expression, 'comment': comment} return ElementCreator(cls, json)
Create the expression :param str name: name of expression :param list ne_ref: network element references for expression :param str operator: 'exclusion' (negation), 'union', 'intersection' (default: exclusion) :param dict sub_expression: sub expression used :param str comment: optional comment :raises CreateElementFailed: element creation failed with reason :return: instance with meta :rtype: Expression
def get_name(self): if self.OP > 0xff: if self.OP >= 0xf2ff: return DALVIK_OPCODES_OPTIMIZED[self.OP][1][0] return DALVIK_OPCODES_EXTENDED_WIDTH[self.OP][1][0] return DALVIK_OPCODES_FORMAT[self.OP][1][0]
Return the name of the instruction :rtype: string
def _parse_uri_options(self, parsed_uri, use_ssl=False, ssl_options=None): ssl_options = ssl_options or {} kwargs = urlparse.parse_qs(parsed_uri.query) vhost = urlparse.unquote(parsed_uri.path[1:]) or DEFAULT_VIRTUAL_HOST options = { 'ssl': use_ssl, 'virtual_host': vhost, 'heartbeat': int(kwargs.pop('heartbeat', [DEFAULT_HEARTBEAT_INTERVAL])[0]), 'timeout': int(kwargs.pop('timeout', [DEFAULT_SOCKET_TIMEOUT])[0]) } if use_ssl: if not compatibility.SSL_SUPPORTED: raise AMQPConnectionError( 'Python not compiled with support ' 'for TLSv1 or higher' ) ssl_options.update(self._parse_ssl_options(kwargs)) options['ssl_options'] = ssl_options return options
Parse the uri options. :param parsed_uri: :param bool use_ssl: :return:
def _EvaluateExpression(frame, expression): try: code = compile(expression, '<watched_expression>', 'eval') except (TypeError, ValueError) as e: return (False, { 'isError': True, 'refersTo': 'VARIABLE_NAME', 'description': { 'format': 'Invalid expression', 'parameters': [str(e)]}}) except SyntaxError as e: return (False, { 'isError': True, 'refersTo': 'VARIABLE_NAME', 'description': { 'format': 'Expression could not be compiled: $0', 'parameters': [e.msg]}}) try: return (True, native.CallImmutable(frame, code)) except BaseException as e: return (False, { 'isError': True, 'refersTo': 'VARIABLE_VALUE', 'description': { 'format': 'Exception occurred: $0', 'parameters': [str(e)]}})
Compiles and evaluates watched expression. Args: frame: evaluation context. expression: watched expression to compile and evaluate. Returns: (False, status) on error or (True, value) on success.
def make_logging_api(client): generated = LoggingServiceV2Client( credentials=client._credentials, client_info=_CLIENT_INFO ) return _LoggingAPI(generated, client)
Create an instance of the Logging API adapter. :type client: :class:`~google.cloud.logging.client.Client` :param client: The client that holds configuration details. :rtype: :class:`_LoggingAPI` :returns: A metrics API instance with the proper credentials.
def equities(country='US'): nasdaqblob, otherblob = _getrawdata() eq_triples = [] eq_triples.extend(_get_nas_triples(nasdaqblob)) eq_triples.extend(_get_other_triples(otherblob)) eq_triples.sort() index = [triple[0] for triple in eq_triples] data = [triple[1:] for triple in eq_triples] return pd.DataFrame(data, index, columns=['Security Name', 'Exchange'], dtype=str)
Return a DataFrame of current US equities. .. versionadded:: 0.4.0 .. versionchanged:: 0.5.0 Return a DataFrame Parameters ---------- country : str, optional Country code for equities to return, defaults to 'US'. Returns ------- eqs : :class:`pandas.DataFrame` DataFrame whose index is a list of all current ticker symbols. Columns are 'Security Name' (e.g. 'Zynerba Pharmaceuticals, Inc. - Common Stock') and 'Exchange' ('NASDAQ', 'NYSE', 'NYSE MKT', etc.) Examples -------- >>> eqs = pn.data.equities('US') Notes ----- Currently only US markets are supported.
def wait_for_stateful_block_init(context, mri, timeout=DEFAULT_TIMEOUT): context.when_matches( [mri, "state", "value"], StatefulStates.READY, bad_values=[StatefulStates.FAULT, StatefulStates.DISABLED], timeout=timeout)
Wait until a Block backed by a StatefulController has initialized Args: context (Context): The context to use to make the child block mri (str): The mri of the child block timeout (float): The maximum time to wait
def notify(self, force_notify=None, use_email=None, use_sms=None, **kwargs): notified = False instance = kwargs.get("instance") if instance._meta.label_lower == self.model: notified = super().notify( force_notify=force_notify, use_email=use_email, use_sms=use_sms, **kwargs, ) return notified
Overridden to only call `notify` if model matches.
def safe_datetime_cast(self, col): casted_dates = pd.to_datetime(col[self.col_name], format=self.date_format, errors='coerce') if len(casted_dates[casted_dates.isnull()]): slice_ = casted_dates.isnull() & ~col[self.col_name].isnull() col[slice_][self.col_name].apply(self.strptime_format) return casted_dates
Parses string values into datetime. Args: col(pandas.DataFrame): Data to transform. Returns: pandas.Series
def compliance_schedule(self, column=None, value=None, **kwargs): return self._resolve_call('PCS_CMPL_SCHD', column, value, **kwargs)
A sequence of activities with associated milestones which pertains to a given permit. >>> PCS().compliance_schedule('cmpl_schd_evt', '62099')
def calcDeviationLimits(value, tolerance, mode): values = toList(value) if mode == 'relative': lowerLimit = min(values) * (1 - tolerance) upperLimit = max(values) * (1 + tolerance) elif mode == 'absolute': lowerLimit = min(values) - tolerance upperLimit = max(values) + tolerance else: raise Exception('mode %s not specified' %(filepath, )) return lowerLimit, upperLimit
Returns the upper and lower deviation limits for a value and a given tolerance, either as relative or a absolute difference. :param value: can be a single value or a list of values if a list of values is given, the minimal value will be used to calculate the lower limit and the maximum value to calculate the upper limit :param tolerance: a number used to calculate the limits :param mode: either ``absolute`` or ``relative``, specifies how the ``tolerance`` should be applied to the ``value``.
def _serialize(self): result = { a: getattr(self, a) for a in type(self).properties if type(self).properties[a].mutable } for k, v in result.items(): if isinstance(v, Base): result[k] = v.id return result
A helper method to build a dict of all mutable Properties of this object
def _get_gosrcs_upper(self, goids, max_upper, go2parentids): gosrcs_upper = set() get_nt = self.gosubdag.go2nt.get go2nt = {g:get_nt(g) for g in goids} go_nt = sorted(go2nt.items(), key=lambda t: -1*t[1].dcnt) goids_upper = set() for goid, _ in go_nt: goids_upper.add(goid) if goid in go2parentids: goids_upper |= go2parentids[goid] if len(goids_upper) < max_upper: gosrcs_upper.add(goid) else: break return gosrcs_upper
Get GO IDs for the upper portion of the GO DAG.
def coro(f): f = asyncio.coroutine(f) def wrapper(*args, **kwargs): loop = asyncio.get_event_loop() try: return loop.run_until_complete(f(*args, **kwargs)) except KeyboardInterrupt: click.echo("Got CTRL+C, quitting..") dev = args[0] loop.run_until_complete(dev.stop_listen_notifications()) except SongpalException as ex: err("Error: %s" % ex) if len(args) > 0 and hasattr(args[0], "debug"): if args[0].debug > 0: raise ex return update_wrapper(wrapper, f)
Run a coroutine and handle possible errors for the click cli. Source https://github.com/pallets/click/issues/85#issuecomment-43378930
def get_completion_context(args): root_command = click.get_current_context().find_root().command ctx = root_command.make_context("globus", list(args), resilient_parsing=True) while isinstance(ctx.command, click.MultiCommand) and args: args = ctx.protected_args + ctx.args if not args: break command = ctx.command.get_command(ctx, args[0]) if not command: return None else: ctx = command.make_context( args[0], args[1:], parent=ctx, resilient_parsing=True ) return ctx
Walk the tree of commands to a terminal command or multicommand, using the Click Context system. Effectively, we'll be using the resilient_parsing mode of commands to stop evaluation, then having them capture their options and arguments, passing us on to the next subcommand. If we walk "off the tree" with a command that we don't recognize, we have a hardstop condition, but otherwise, we walk as far as we can go and that's the location from which we should do our completion work.
def _get_raw_data(self, name): filestem = '' for filestem, list_fvar in self._files.items(): if name in list_fvar: break fieldfile = self.step.sdat.filename(filestem, self.step.isnap, force_legacy=True) if not fieldfile.is_file(): fieldfile = self.step.sdat.filename(filestem, self.step.isnap) parsed_data = None if fieldfile.is_file(): parsed_data = stagyyparsers.fields(fieldfile) elif self.step.sdat.hdf5 and self._filesh5: for filestem, list_fvar in self._filesh5.items(): if name in list_fvar: break parsed_data = stagyyparsers.read_field_h5( self.step.sdat.hdf5 / 'Data.xmf', filestem, self.step.isnap) return list_fvar, parsed_data
Find file holding data and return its content.
def n_rows(self): N_estimated = (self.hl * np.pi / (2 * self.stout_w_per_flow(self.hl) * self.q)).to(u.dimensionless) variablerow = min(10, max(4, math.trunc(N_estimated.magnitude))) return variablerow
This equation states that the open area corresponding to one row can be set equal to two orifices of diameter=row height. If there are more than two orifices per row at the top of the LFOM then there are more orifices than are convenient to drill and more than necessary for good accuracy. Thus this relationship can be used to increase the spacing between the rows and thus increase the diameter of the orifices. This spacing function also sets the lower depth on the high flow rate LFOM with no accurate flows below a depth equal to the first row height. But it might be better to always set then number of rows to 10. The challenge is to figure out a reasonable system of constraints that reliably returns a valid solution.
def _replace_and_publish(self, path, prettyname, value, device): if value is None: return newpath = path newpath = ".".join([".".join(path.split(".")[:-1]), prettyname]) metric = Metric(newpath, value, precision=4, host=device) self.publish_metric(metric)
Inputs a complete path for a metric and a value. Replace the metric name and publish.
def example_number_for_non_geo_entity(country_calling_code): metadata = PhoneMetadata.metadata_for_nongeo_region(country_calling_code, None) if metadata is not None: for desc in (metadata.mobile, metadata.toll_free, metadata.shared_cost, metadata.voip, metadata.voicemail, metadata.uan, metadata.premium_rate): try: if (desc is not None and desc.example_number is not None): return parse(_PLUS_SIGN + unicod(country_calling_code) + desc.example_number, UNKNOWN_REGION) except NumberParseException: pass return None
Gets a valid number for the specified country calling code for a non-geographical entity. Arguments: country_calling_code -- The country calling code for a non-geographical entity. Returns a valid number for the non-geographical entity. Returns None when the metadata does not contain such information, or the country calling code passed in does not belong to a non-geographical entity.
def get_available_versions(self, project_name): available_versions = self.pypi_client.package_releases(project_name) if not available_versions: available_versions = self.pypi_client.package_releases( project_name.capitalize() ) return dict( (self._parse_version(version), version) for version in available_versions )
Query PyPI to see if package has any available versions. Args: project_name (str): The name the project on PyPI. Returns: dict: Where keys are tuples of parsed versions and values are the versions returned by PyPI.
def apply_filter(self): self._ensure_modification_is_safe() if len(self.query.filters) > 0: self._iterable = Filter.filter(self.query.filters, self._iterable)
Naively apply query filters.
def has_submenu_items(self, current_page, allow_repeating_parents, original_menu_tag, menu_instance=None, request=None): return menu_instance.page_has_children(self)
When rendering pages in a menu template a `has_children_in_menu` attribute is added to each page, letting template developers know whether or not the item has a submenu that must be rendered. By default, we return a boolean indicating whether the page has suitable child pages to include in such a menu. But, if you are overriding the `modify_submenu_items` method to programatically add items that aren't child pages, you'll likely need to alter this method too, so the template knows there are sub items to be rendered.
def make_while_loop(test_and_body_instrs, else_body_instrs, context): top_of_loop = test_and_body_instrs[0] test, body_instrs = make_while_loop_test_expr(test_and_body_instrs) body, orelse_body = make_loop_body_and_orelse( top_of_loop, body_instrs, else_body_instrs, context, ) return ast.While(test=test, body=body, orelse=orelse_body)
Make an ast.While node. Parameters ---------- test_and_body_instrs : deque Queue of instructions forming the loop test expression and body. else_body_instrs : deque Queue of instructions forming the else block of the loop. context : DecompilationContext
def addend_ids(self): return tuple( arg for arg in self._subtotal_dict.get("args", []) if arg in self.valid_elements.element_ids )
tuple of int ids of elements contributing to this subtotal. Any element id not present in the dimension or present but representing missing data is excluded.
def parse_readme(cls, readme_path: str = 'README.rst', encoding: str = 'utf-8') -> str: with HERE.joinpath(readme_path).open(encoding=encoding) as readme_file: long_description = readme_file.read() if readme_path.endswith('.rst') and cls.download_url.startswith('https://github.com/'): base_url = '{}/blob/v{}/'.format(cls.download_url, cls.version) long_description = resolve_relative_rst_links(long_description, base_url) return long_description
Parse readme and resolve relative links in it if it is feasible. Links are resolved if readme is in rst format and the package is hosted on GitHub.
def pexpect(self): import pexpect assert not self._ignore_errors _check_directory(self.directory) arguments = self.arguments return pexpect.spawn( arguments[0], args=arguments[1:], env=self.env, cwd=self.directory )
Run command and return pexpect process object. NOTE: Requires you to pip install 'pexpect' or will fail.
def list_renderers(*args): renderers_ = salt.loader.render(__opts__, []) renderers = set() if not args: for rend in six.iterkeys(renderers_): renderers.add(rend) return sorted(renderers) for module in args: for rend in fnmatch.filter(renderers_, module): renderers.add(rend) return sorted(renderers)
List the renderers loaded on the minion .. versionadded:: 2015.5.0 CLI Example: .. code-block:: bash salt '*' sys.list_renderers Render names can be specified as globs. .. code-block:: bash salt '*' sys.list_renderers 'yaml*'
def run_command(self, command, message): proc = subprocess.Popen([ 'echo \'%s\' | %s' % (fedmsg.encoding.dumps(message), command) ], shell=True, executable='/bin/bash') return proc.wait()
Use subprocess; feed the message to our command over stdin
def main(): save_settings = None stdin_fd = -1 try: import termios stdin_fd = sys.stdin.fileno() save_settings = termios.tcgetattr(stdin_fd) except: pass try: real_main() finally: if save_settings: termios.tcsetattr(stdin_fd, termios.TCSANOW, save_settings)
This main function saves the stdin termios settings, calls real_main, and restores stdin termios settings when it returns.
def load_user_options(self): if self._profile_list is None: if callable(self.profile_list): self._profile_list = yield gen.maybe_future(self.profile_list(self)) else: self._profile_list = self.profile_list if self._profile_list: yield self._load_profile(self.user_options.get('profile', None))
Load user options from self.user_options dict This can be set via POST to the API or via options_from_form Only supported argument by default is 'profile'. Override in subclasses to support other options.
def fetch(self): params = values.of({}) payload = self._version.fetch( 'GET', self._uri, params=params, ) return ExecutionInstance( self._version, payload, flow_sid=self._solution['flow_sid'], sid=self._solution['sid'], )
Fetch a ExecutionInstance :returns: Fetched ExecutionInstance :rtype: twilio.rest.studio.v1.flow.execution.ExecutionInstance
def debug(self, msg, *args, **kwargs): kwargs.setdefault('inc_stackinfo', True) self.log(DEBUG, msg, args, **kwargs)
Log a message with DEBUG level. Automatically includes stack info unless it is specifically not included.
def chunks(iterable, size=50): batch = [] for n in iterable: batch.append(n) if len(batch) % size == 0: yield batch batch = [] if batch: yield batch
Break an iterable into lists of size
def visit_DictComp(self, node: AST, dfltChaining: bool = True) -> str: return f"{{{self.visit(node.key)}: {self.visit(node.value)} " \ f"{' '.join(self.visit(gen) for gen in node.generators)}}}"
Return `node`s representation as dict comprehension.
def i2c_pullups(self): ret = api.py_aa_i2c_pullup(self.handle, I2C_PULLUP_QUERY) _raise_error_if_negative(ret) return ret
Setting this to `True` will enable the I2C pullup resistors. If set to `False` the pullup resistors will be disabled. Raises an :exc:`IOError` if the hardware adapter does not support pullup resistors.
def command_check(string, vargs): is_valid = is_valid_ipa(string) print(is_valid) if not is_valid: valid_chars, invalid_chars = remove_invalid_ipa_characters( unicode_string=string, return_invalid=True ) print_invalid_chars(invalid_chars, vargs)
Check if the given string is IPA valid. If the given string is not IPA valid, print the invalid characters. :param str string: the string to act upon :param dict vargs: the command line arguments
def _update_pvalcorr(ntmt, corrected_pvals): if corrected_pvals is None: return for rec, val in zip(ntmt.results, corrected_pvals): rec.set_corrected_pval(ntmt.nt_method, val)
Add data members to store multiple test corrections.
def delete_queue(queues): current_queues.delete(queues=queues) click.secho( 'Queues {} have been deleted.'.format( queues or current_queues.queues.keys()), fg='green' )
Delete the given queues.
def create_java_executor(self, dist=None): dist = dist or self.dist if self.execution_strategy == self.NAILGUN: classpath = os.pathsep.join(self.tool_classpath('nailgun-server')) return NailgunExecutor(self._identity, self._executor_workdir, classpath, dist, startup_timeout=self.get_options().nailgun_subprocess_startup_timeout, connect_timeout=self.get_options().nailgun_timeout_seconds, connect_attempts=self.get_options().nailgun_connect_attempts) else: return SubprocessExecutor(dist)
Create java executor that uses this task's ng daemon, if allowed. Call only in execute() or later. TODO: Enforce this.
def get_grp2codes(self): grp2codes = cx.defaultdict(set) for code, ntd in self.code2nt.items(): grp2codes[ntd.group].add(code) return dict(grp2codes)
Get dict of group name to namedtuples.
def only(self, *keys): items = [] for key, value in enumerate(self.items): if key in keys: items.append(value) return self.__class__(items)
Get the items with the specified keys. :param keys: The keys to keep :type keys: tuple :rtype: Collection
def lstm_cell(x, h, c, state_size, w_init=None, b_init=None, fix_parameters=False): xh = F.concatenate(*(x, h), axis=1) iofc = affine(xh, (4, state_size), w_init=w_init, b_init=b_init, fix_parameters=fix_parameters) i_t, o_t, f_t, gate = F.split(iofc, axis=1) c_t = F.sigmoid(f_t) * c + F.sigmoid(i_t) * F.tanh(gate) h_t = F.sigmoid(o_t) * F.tanh(c_t) return h_t, c_t
Long Short-Term Memory. Long Short-Term Memory, or LSTM, is a building block for recurrent neural networks (RNN) layers. LSTM unit consists of a cell and input, output, forget gates whose functions are defined as following: .. math:: f_t&&=\\sigma(W_fx_t+U_fh_{t-1}+b_f) \\\\ i_t&&=\\sigma(W_ix_t+U_ih_{t-1}+b_i) \\\\ o_t&&=\\sigma(W_ox_t+U_oh_{t-1}+b_o) \\\\ c_t&&=f_t\\odot c_{t-1}+i_t\\odot\\tanh(W_cx_t+U_ch_{t-1}+b_c) \\\\ h_t&&=o_t\\odot\\tanh(c_t). References: S. Hochreiter, and J. Schmidhuber. "Long Short-Term Memory." Neural Computation. 1997. Args: x (~nnabla.Variable): Input N-D array with shape (batch_size, input_size). h (~nnabla.Variable): Input N-D array with shape (batch_size, state_size). c (~nnabla.Variable): Input N-D array with shape (batch_size, state_size). state_size (int): Internal state size is set to `state_size`. w_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`, optional): Initializer for weight. By default, it is initialized with :obj:`nnabla.initializer.UniformInitializer` within the range determined by :obj:`nnabla.initializer.calc_uniform_lim_glorot`. b_init (:obj:`nnabla.initializer.BaseInitializer` or :obj:`numpy.ndarray`, optional): Initializer for bias. By default, it is initialized with zeros if `with_bias` is `True`. fix_parameters (bool): When set to `True`, the weights and biases will not be updated. Returns: :class:`~nnabla.Variable`
def within_rupture_distance(self, surface, distance, **kwargs): upper_depth, lower_depth = _check_depth_limits(kwargs) rrupt = surface.get_min_distance(self.catalogue.hypocentres_as_mesh()) is_valid = np.logical_and( rrupt <= distance, np.logical_and(self.catalogue.data['depth'] >= upper_depth, self.catalogue.data['depth'] < lower_depth)) return self.select_catalogue(is_valid)
Select events within a rupture distance from a fault surface :param surface: Fault surface as instance of nhlib.geo.surface.base.BaseSurface :param float distance: Rupture distance (km) :returns: Instance of :class:`openquake.hmtk.seismicity.catalogue.Catalogue` containing only selected events
def _includes_base_class(self, iter_classes, base_class): return any( issubclass(auth_class, base_class) for auth_class in iter_classes, )
Returns whether any class in iter_class is a subclass of the given base_class.
def _checkIfClusterExists(self): ansibleArgs = { 'resgrp': self.clusterName, 'region': self._zone } try: self.callPlaybook(self.playbook['check-cluster'], ansibleArgs, wait=True) except RuntimeError: logger.info("The cluster could not be created. Try deleting the cluster if it already exits.") raise
Try deleting the resource group. This will fail if it exists and raise an exception.
def multi_series_single_value(self, keys=None, ts=None, direction=None, attrs={}, tags=[]): url = 'single/' if ts is not None: vts = check_time_param(ts) else: vts = None params = { 'key': keys, 'tag': tags, 'attr': attrs, 'ts': vts, 'direction': direction } url_args = endpoint.make_url_args(params) url = '?'.join([url, url_args]) resp = self.session.get(url) return resp
Return a single value for multiple series. You can supply a timestamp as the ts argument, otherwise the search defaults to the current time. The direction argument can be one of "exact", "before", "after", or "nearest". The id, key, tag, and attr arguments allow you to filter for series. See the :meth:`list_series` method for an explanation of their use. :param string keys: (optional) a list of keys for the series to use :param ts: (optional) the time to begin searching from :type ts: ISO8601 string or Datetime object :param string direction: criterion for the search :param tags: filter by one or more tags :type tags: list or string :param dict attrs: filter by one or more key-value attributes :rtype: :class:`tempodb.protocol.cursor.SingleValueCursor` with an iterator over :class:`tempodb.protocol.objects.SingleValue` objects
def reference_preprocessing(job, samples, config): job.fileStore.logToMaster('Processed reference files') config.fai = job.addChildJobFn(run_samtools_faidx, config.reference).rv() config.dict = job.addChildJobFn(run_picard_create_sequence_dictionary, config.reference).rv() job.addFollowOnJobFn(map_job, download_sample, samples, config)
Spawn the jobs that create index and dict file for reference :param JobFunctionWrappingJob job: passed automatically by Toil :param Namespace config: Argparse Namespace object containing argument inputs :param list[list] samples: A nested list of samples containing sample information
async def modify(cls, db, key, data: dict): if data is None: raise BadRequest('Failed to modify document. No data fields to modify') cls._validate(data) query = {cls.primary_key: key} for i in cls.connection_retries(): try: result = await db[cls.get_collection_name()].find_one_and_update( filter=query, update={'$set': data}, return_document=ReturnDocument.AFTER ) if result: updated_obj = cls.create_model(result) updated_obj._db = db asyncio.ensure_future(post_save.send( sender=cls, db=db, instance=updated_obj, created=False) ) return updated_obj return None except ConnectionFailure as ex: exceed = await cls.check_reconnect_tries_and_wait(i, 'update') if exceed: raise ex
Partially modify a document by providing a subset of its data fields to be modified :param db: Handle to the MongoDB database :param key: The primary key of the database object being modified. Usually its ``_id`` :param data: The data set to be modified :type data: ``dict``
async def on_raw_314(self, message): target, nickname, username, hostname, _, realname = message.params info = { 'username': username, 'hostname': hostname, 'realname': realname } if nickname in self._pending['whowas']: self._whowas_info[nickname].update(info)
WHOWAS user info.
def listed(self): print("\nPackages in the queue:\n") for pkg in self.packages(): if pkg: print("{0}{1}{2}".format(self.meta.color["GREEN"], pkg, self.meta.color["ENDC"])) self.quit = True if self.quit: print("")
Print packages from queue
def _store(self, messages, response, *args, **kwargs): return [message for message in messages if not message.level in STICKY_MESSAGE_LEVELS]
Delete all messages that are sticky and return the other messages This storage never save objects
def cpjoin(*args): rooted = True if args[0].startswith('/') else False def deslash(a): return a[1:] if a.startswith('/') else a newargs = [deslash(arg) for arg in args] path = os.path.join(*newargs) if rooted: path = os.path.sep + path return path
custom path join
def pop(self, index): r obj = self[index] self.purge_object(obj, deep=False) return obj
r""" The object at the given index is removed from the list and returned. Notes ----- This method uses ``purge_object`` to perform the actual removal of the object. It is reommended to just use that directly instead. See Also -------- purge_object
def set_log_level(verbose, quiet): if quiet: verbose = -1 if verbose < 0: verbose = logging.CRITICAL elif verbose == 0: verbose = logging.WARNING elif verbose == 1: verbose = logging.INFO elif 1 < verbose: verbose = logging.DEBUG LOGGER.setLevel(verbose)
Ses the logging level of the script based on command line options. Arguments: - `verbose`: - `quiet`:
def set_duration(self, duration): if duration == 0: self.widget.setDuration(-2) else: self.widget.setDuration(0)
Android for whatever stupid reason doesn't let you set the time it only allows 1-long or 0-short. So we have to repeatedly call show until the duration expires, hence this method does nothing see `set_show`.
def _check_std(self, paths, cmd_pieces): cmd_pieces.extend(paths) process = Popen(cmd_pieces, stdout=PIPE, stderr=PIPE) out, err = process.communicate() lines = out.strip().splitlines() + err.strip().splitlines() result = [] for line in lines: match = self.tool_err_re.match(line) if not match: if self.break_on_tool_re_mismatch: raise ValueError( 'Unexpected `%s` output: %r' % ( ' '.join(cmd_pieces), paths, line)) continue vals = match.groupdict() vals['lineno'] = int(vals['lineno']) vals['colno'] = \ int(vals['colno']) if vals['colno'] is not None else '' result.append(vals) return result
Run `cmd` as a check on `paths`.
def diff_config(base, target): if not isinstance(base, collections.Mapping): if base == target: return {} return target if not isinstance(target, collections.Mapping): return target result = dict() for k in iterkeys(base): if k not in target: result[k] = None for k, v in iteritems(target): if k in base: merged = diff_config(base[k], v) if merged != {}: result[k] = merged else: result[k] = v return result
Find the differences between two configurations. This finds a delta configuration from `base` to `target`, such that calling :func:`overlay_config` with `base` and the result of this function yields `target`. This works as follows: * If both are identical (of any type), returns an empty dictionary. * If either isn't a dictionary, returns `target`. * Any key in `target` not present in `base` is included in the output with its value from `target`. * Any key in `base` not present in `target` is included in the output with value :const:`None`. * Any keys present in both dictionaries are recursively merged. >>> diff_config({'a': 'b'}, {}) {'a': None} >>> diff_config({'a': 'b'}, {'a': 'b', 'c': 'd'}) {'c': 'd'} :param dict base: original configuration :param dict target: new configuration :return: overlay configuration :returntype dict:
def convex_hull(features): points = sorted([s.point() for s in features]) l = reduce(_keep_left, points, []) u = reduce(_keep_left, reversed(points), []) return l.extend(u[i] for i in xrange(1, len(u) - 1)) or l
Returns points on convex hull of an array of points in CCW order.
def get_temp_directory(): directory = os.path.join(gettempdir(), "ttkthemes") if not os.path.exists(directory): os.makedirs(directory) return directory
Return an absolute path to an existing temporary directory
def keys(self, key_type=None): if key_type is not None: intermediate_key = str(key_type) if intermediate_key in self.__dict__: return self.__dict__[intermediate_key].keys() else: all_keys = {} for keys in self.items_dict.keys(): all_keys[keys] = None return all_keys.keys()
Returns a copy of the dictionary's keys. @param key_type if specified, only keys for this type will be returned. Otherwise list of tuples containing all (multiple) keys will be returned.
def add_virtual_loss(self, up_to): self.losses_applied += 1 loss = self.position.to_play self.W += loss if self.parent is None or self is up_to: return self.parent.add_virtual_loss(up_to)
Propagate a virtual loss up to the root node. Args: up_to: The node to propagate until. (Keep track of this! You'll need it to reverse the virtual loss later.)
def hr_avg(self): hr_data = self.hr_values() return int(sum(hr_data) / len(hr_data))
Average heart rate of the workout
def add_constant(self, name, value, path=0): assert isinstance(name, basestring) assert is_iterable_typed(value, basestring) assert isinstance(path, int) if path: l = self.location_ if not l: l = self.get('source-location') value = os.path.join(l, value[0]) value = [os.path.normpath(os.path.join(os.getcwd(), value))] self.constants_[name] = value bjam.call("set-variable", self.project_module(), name, value)
Adds a new constant for this project. The constant will be available for use in Jamfile module for this project. If 'path' is true, the constant will be interpreted relatively to the location of project.
def loadJSON(self, jdata): super(StringColumn, self).loadJSON(jdata) self.__maxLength = jdata.get('maxLength') or self.__maxLength
Loads JSON data for this column type. :param jdata: <dict>
def classifer_metrics(label, pred): prediction = np.argmax(pred, axis=1) label = label.astype(int) pred_is_entity = prediction != not_entity_index label_is_entity = label != not_entity_index corr_pred = (prediction == label) == (pred_is_entity == True) num_entities = np.sum(label_is_entity) entity_preds = np.sum(pred_is_entity) correct_entitites = np.sum(corr_pred[pred_is_entity]) precision = correct_entitites/entity_preds if entity_preds == 0: precision = np.nan recall = correct_entitites / num_entities if num_entities == 0: recall = np.nan f1 = 2 * precision * recall / (precision + recall) return precision, recall, f1
computes f1, precision and recall on the entity class
def example_lchab_to_lchuv(): print("=== Complex Example: LCHab->LCHuv ===") lchab = LCHabColor(0.903, 16.447, 352.252) print(lchab) lchuv = convert_color(lchab, LCHuvColor) print(lchuv) print("=== End Example ===\n")
This function shows very complex chain of conversions in action. LCHab to LCHuv involves four different calculations, making this the conversion requiring the most steps.
def permute(self, qubits: Qubits) -> 'Channel': vec = self.vec.permute(qubits) return Channel(vec.tensor, qubits=vec.qubits)
Return a copy of this channel with qubits in new order
def GetMemTargetSizeMB(self): counter = c_uint() ret = vmGuestLib.VMGuestLib_GetMemTargetSizeMB(self.handle.value, byref(counter)) if ret != VMGUESTLIB_ERROR_SUCCESS: raise VMGuestLibException(ret) return counter.value
Retrieves the size of the target memory allocation for this virtual machine.
def get_by_ip(cls, ip): 'Returns Host instance for the given ip address.' ret = cls.hosts_by_ip.get(ip) if ret is None: ret = cls.hosts_by_ip[ip] = [Host(ip)] return ret
Returns Host instance for the given ip address.
def fetch_existing_token_of_user(self, client_id, grant_type, user_id): token_data = self.fetchone(self.fetch_existing_token_of_user_query, client_id, grant_type, user_id) if token_data is None: raise AccessTokenNotFound scopes = self._fetch_scopes(access_token_id=token_data[0]) data = self._fetch_data(access_token_id=token_data[0]) return self._row_to_token(data=data, scopes=scopes, row=token_data)
Retrieve an access token issued to a client and user for a specific grant. :param client_id: The identifier of a client as a `str`. :param grant_type: The type of grant. :param user_id: The identifier of the user the access token has been issued to. :return: An instance of :class:`oauth2.datatype.AccessToken`. :raises: :class:`oauth2.error.AccessTokenNotFound` if not access token could be retrieved.
def optimizer(self) -> Union[mx.optimizer.Optimizer, SockeyeOptimizer]: return self.current_module._optimizer
Returns the optimizer of the underlying module.
def block2(self, value): option = Option() option.number = defines.OptionRegistry.BLOCK2.number num, m, size = value if size > 512: szx = 6 elif 256 < size <= 512: szx = 5 elif 128 < size <= 256: szx = 4 elif 64 < size <= 128: szx = 3 elif 32 < size <= 64: szx = 2 elif 16 < size <= 32: szx = 1 else: szx = 0 value = (num << 4) value |= (m << 3) value |= szx option.value = value self.add_option(option)
Set the Block2 option. :param value: the Block2 value
def set_blocked(self, name): self.unregister(name=name) self._name2plugin[name] = None
block registrations of the given name, unregister if already registered.
def predict(self, features, verbose=False): probs = self.clf.predict_proba(features) if verbose: labels = self.labels.classes_ res = [] for prob in probs: vals = {} for i, val in enumerate(prob): label = labels[i] vals[label] = val res.append(vals) return res else: return probs
Probability estimates of each feature See sklearn's SGDClassifier predict and predict_proba methods. Args: features (:obj:`list` of :obj:`list` of :obj:`float`) verbose: Boolean, optional. If true returns an array where each element is a dictionary, where keys are labels and values are the respective probabilities. Defaults to False. Returns: Array of array of numbers, or array of dictionaries if verbose i True
def describe(cls) -> None: max_lengths = [] for attr_name in cls.attr_names(): attr_func = "%ss" % attr_name attr_list = list(map(str, getattr(cls, attr_func)())) + [attr_name] max_lengths.append(max(list(map(len, attr_list)))) row_format = "{:>%d} | {:>%d} | {:>%d}" % tuple(max_lengths) headers = [attr_name.capitalize() for attr_name in cls.attr_names()] header_line = row_format.format(*headers) output = "Class: %s\n" % cls.__name__ output += header_line + "\n" output += "-"*(len(header_line)) + "\n" for item in cls: format_list = [str(getattr(item, attr_name)) for attr_name in cls.attr_names()] output += row_format.format(*format_list) + "\n" print(output)
Prints in the console a table showing all the attributes for all the definitions inside the class :return: None
def fulfill(self, value): assert self._state==self.PENDING self._state=self.FULFILLED; self.value = value for callback in self._callbacks: try: callback(value) except Exception: pass self._callbacks = []
Fulfill the promise with a given value.
def from_list(a, order='F'): d = len(a) res = vector() n = _np.zeros(d, dtype=_np.int32) r = _np.zeros(d+1, dtype=_np.int32) cr = _np.array([]) for i in xrange(d): cr = _np.concatenate((cr, a[i].flatten(order))) r[i] = a[i].shape[0] r[i+1] = a[i].shape[2] n[i] = a[i].shape[1] res.d = d res.n = n res.r = r res.core = cr res.get_ps() return res
Generate TT-vectorr object from given TT cores. :param a: List of TT cores. :type a: list :returns: vector -- TT-vector constructed from the given cores.
def handle_molecular_activity_default(_: str, __: int, tokens: ParseResults) -> ParseResults: upgraded = language.activity_labels[tokens[0]] tokens[NAMESPACE] = BEL_DEFAULT_NAMESPACE tokens[NAME] = upgraded return tokens
Handle a BEL 2.0 style molecular activity with BEL default names.
def setPlainText(self, txt, mime_type, encoding): self.file.mimetype = mime_type self.file._encoding = encoding self._original_text = txt self._modified_lines.clear() import time t = time.time() super(CodeEdit, self).setPlainText(txt) _logger().log(5, 'setPlainText duration: %fs' % (time.time() - t)) self.new_text_set.emit() self.redoAvailable.emit(False) self.undoAvailable.emit(False)
Extends setPlainText to force the user to setup an encoding and a mime type. Emits the new_text_set signal. :param txt: The new text to set. :param mime_type: Associated mimetype. Setting the mime will update the pygments lexer. :param encoding: text encoding
def do_checkout(self, subcmd, opts, *args): print "'svn %s' opts: %s" % (subcmd, opts) print "'svn %s' args: %s" % (subcmd, args)
Check out a working copy from a repository. usage: checkout URL... [PATH] Note: If PATH is omitted, the basename of the URL will be used as the destination. If multiple URLs are given each will be checked out into a sub-directory of PATH, with the name of the sub-directory being the basename of the URL. ${cmd_option_list}
def mount(self, path, mount): self._mountpoints[self._join_chunks(self._normalize_path(path))] = mount
Add a mountpoint to the filesystem.
def record(self): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('Boot Record not yet initialized') return struct.pack(self.FMT, VOLUME_DESCRIPTOR_TYPE_BOOT_RECORD, b'CD001', 1, self.boot_system_identifier, self.boot_identifier, self.boot_system_use)
A method to generate a string representing this Boot Record. Parameters: None. Returns: A string representing this Boot Record.
def transform_flask_from_import(node): new_names = [] for (name, as_name) in node.names: actual_module_name = 'flask_{}'.format(name) new_names.append((actual_module_name, as_name or name)) new_node = nodes.Import() copy_node_info(node, new_node) new_node.names = new_names mark_transformed(new_node) return new_node
Translates a flask.ext from-style import into a non-magical import. Translates: from flask.ext import wtf, bcrypt as fcrypt Into: import flask_wtf as wtf, flask_bcrypt as fcrypt
def asdensity(self) -> 'Density': matrix = bk.outer(self.tensor, bk.conj(self.tensor)) return Density(matrix, self.qubits, self._memory)
Convert a pure state to a density matrix
def write_conf(self): f = open(self.output_filename, 'w') print(self.t.render(prefixes=self.prefixes), file=f) f.close()
Write the config to file
def handle_button(self, event, event_type): mouse_button_number = self._get_mouse_button_number(event) if event_type in (25, 26): event_type = event_type + (mouse_button_number * 0.1) event_type_name, event_code, value, scan = self.codes[event_type] if event_type_name == "Key": scan_event, key_event = self.emulate_press( event_code, scan, value, self.timeval) self.events.append(scan_event) self.events.append(key_event)
Handle mouse click.
def encode(string): result=".".join([ str(ord(s)) for s in string ]) return "%s." % (len(string)) + result
Encode the given string as an OID. >>> import snmp_passpersist as snmp >>> snmp.PassPersist.encode("hello") '5.104.101.108.108.111' >>>
def get_field(self, path, name): try: value = self.get(path, name) if not isinstance(value, str): raise TypeError() return value except KeyError: raise KeyError()
Retrieves the value of the field at the specified path. :param path: str or Path instance :param name: :type name: str :return: :raises ValueError: A component of path is a field name. :raises KeyError: A component of path doesn't exist. :raises TypeError: The field name is a component of a path.
def prune(self, minimum_word_frequency_percentage=1): pruned_resulting_documents = [] for document in self.resulting_documents: new_document = [] for word in document: if self.word_in_how_many_documents[word] >= minimum_word_frequency_percentage / 100. * len( self.resulting_documents): new_document.append(word) pruned_resulting_documents.append(new_document) self.resulting_documents = pruned_resulting_documents
Filter out words that occur less than minimum_word_frequency times. :param minimum_word_frequency_percentage: minimum frequency of words to keep
def inspect_network(self, net_id, verbose=None, scope=None): params = {} if verbose is not None: if version_lt(self._version, '1.28'): raise InvalidVersion('verbose was introduced in API 1.28') params['verbose'] = verbose if scope is not None: if version_lt(self._version, '1.31'): raise InvalidVersion('scope was introduced in API 1.31') params['scope'] = scope url = self._url("/networks/{0}", net_id) res = self._get(url, params=params) return self._result(res, json=True)
Get detailed information about a network. Args: net_id (str): ID of network verbose (bool): Show the service details across the cluster in swarm mode. scope (str): Filter the network by scope (``swarm``, ``global`` or ``local``).
def to_bytes(s, encoding="utf-8"): if isinstance(s, six.binary_type): return s else: return six.text_type(s).encode(encoding)
Converts the string to a bytes type, if not already. :s: the string to convert to bytes :returns: `str` on Python2 and `bytes` on Python3.
def decode_int(self, str): n = 0 for c in str: n = n * self.BASE + self.ALPHABET_REVERSE[c] return n
Decodes a short Base64 string into an integer. Example: ``decode_int('B7')`` returns ``123``.
def mangle_form(form): "Utility to monkeypatch forms into paperinputs, untested" for field, widget in form.fields.iteritems(): if type(widget) is forms.widgets.TextInput: form.fields[field].widget = PaperTextInput() form.fields[field].label = '' if type(widget) is forms.widgets.PasswordInput: field.widget = PaperPasswordInput() field.label = '' return form
Utility to monkeypatch forms into paperinputs, untested