code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def uninstall_all_visa_handlers(self, session): if session is not None: self.__uninstall_all_handlers_helper(session) else: for session in list(self.handlers): self.__uninstall_all_handlers_helper(session)
Uninstalls all previously installed handlers for a particular session. :param session: Unique logical identifier to a session. If None, operates on all sessions.
def tqdm_hook(t): last_b = [0] def update_to(b=1, bsize=1, tsize=None): if tsize is not None: t.total = tsize t.update((b - last_b[0]) * bsize) last_b[0] = b return update_to
Wraps tqdm instance. Don't forget to close() or __exit__() the tqdm instance once you're done with it (easiest using `with` syntax). Example ------- >>> with tqdm(...) as t: ... reporthook = my_hook(t) ... urllib.urlretrieve(..., reporthook=reporthook)
def calc_model(cortex, model_argument, model_hemi=Ellipsis, radius=np.pi/3): if pimms.is_str(model_argument): h = cortex.chirality if model_hemi is Ellipsis else \ None if model_hemi is None else \ model_hemi model = retinotopy_model(model_argument, hemi=h, radius=radius) else: model = model_argument if not isinstance(model, RegisteredRetinotopyModel): raise ValueError('model must be a RegisteredRetinotopyModel') return model
calc_model loads the appropriate model object given the model argument, which may given the name of the model or a model object itself. Required afferent parameters: @ model_argument Must be either a RegisteredRetinotopyModel object or the name of a model that can be loaded. Optional afferent parameters: @ model_hemi May be used to specify the hemisphere of the model; this is usually only used when the fsaverage_sym hemisphere is desired, in which case this should be set to None; if left at the default value (Ellipsis), then it will use the hemisphere of the cortex param. Provided efferent values: @ model Will be the RegisteredRetinotopyModel object to which the mesh should be registered.
def __prepare_args(self, args): ret = [] for a in args: if isinstance(a, six.binary_type): if self.__size_expr.match(a): ret += [a] else: ret += [b'"' + a + b'"'] continue ret += [bytes(str(a).encode("utf-8"))] return ret
Format command arguments before sending them. Command arguments of type string must be quoted, the only exception concerns size indication (of the form {\d\+?}). :param args: list of arguments :return: a list for transformed arguments
def delete_dcnm_out_part(self, tenant_id, fw_dict, is_fw_virt=False): res = fw_const.DCNM_OUT_PART_DEL_SUCCESS tenant_name = fw_dict.get('tenant_name') ret = True try: self._delete_partition(tenant_id, tenant_name) except Exception as exc: LOG.error("deletion of Out Partition failed for tenant " "%(tenant)s, Exception %(exc)s", {'tenant': tenant_id, 'exc': str(exc)}) res = fw_const.DCNM_OUT_PART_DEL_FAIL ret = False self.update_fw_db_result(tenant_id, dcnm_status=res) LOG.info("Out partition deleted") return ret
Delete the DCNM OUT partition and update the result.
def login(self, username=None, password=None): if username is None: username = self.username if password is None: password = self.password logger.debug("Logging into server") self.sc.sessionManager.Login(userName=username, password=password) self._logged_in = True
Login to a vSphere server. >>> client.login(username='Administrator', password='strongpass') :param username: The username to authenticate as. :type username: str :param password: The password to authenticate with. :type password: str
def set_data_length(self, length): if not self._initialized: raise pycdlibexception.PyCdlibInternalError('El Torito Entry not initialized') self.sector_count = utils.ceiling_div(length, 512)
A method to set the length of data for this El Torito Entry. Parameters: length - The new length for the El Torito Entry. Returns: Nothing.
def _record_values_for_fit_summary_and_statsmodels(self): needed_attributes = ["fitted_probs", "params", "log_likelihood", "standard_errors"] try: assert all([hasattr(self, attr) for attr in needed_attributes]) assert all([getattr(self, attr) is not None for attr in needed_attributes]) except AssertionError: msg = "Call this function only after setting/calculating all other" msg_2 = " estimation results attributes" raise NotImplementedError(msg + msg_2) self.nobs = self.fitted_probs.shape[0] self.df_model = self.params.shape[0] self.df_resid = self.nobs - self.df_model self.llf = self.log_likelihood self.bse = self.standard_errors self.aic = compute_aic(self) self.bic = compute_bic(self) return None
Store the various estimation results that are used to describe how well the estimated model fits the given dataset, and record the values that are needed for the statsmodels estimation results table. All values are stored on the model instance. Returns ------- None.
def codemirror_field_js_bundle(field): manifesto = CodemirrorAssetTagRender() manifesto.register_from_fields(field) try: bundle_name = manifesto.js_bundle_names()[0] except IndexError: msg = ("Given field with configuration name '{}' does not have a " "Javascript bundle name") raise CodeMirrorFieldBundleError(msg.format(field.config_name)) return bundle_name
Filter to get CodeMirror Javascript bundle name needed for a single field. Example: :: {% load djangocodemirror_tags %} {{ form.myfield|codemirror_field_js_bundle }} Arguments: field (django.forms.fields.Field): A form field that contains a widget :class:`djangocodemirror.widget.CodeMirrorWidget`. Raises: CodeMirrorFieldBundleError: If Codemirror configuration form field does not have a bundle name. Returns: string: Bundle name to load with webassets.
def set_default_headers(self): mod_opts = self.application.mod_opts if mod_opts.get('cors_origin'): origin = self.request.headers.get('Origin') allowed_origin = _check_cors_origin(origin, mod_opts['cors_origin']) if allowed_origin: self.set_header("Access-Control-Allow-Origin", allowed_origin)
Set default CORS headers
def instruction_path(cls, project, instruction): return google.api_core.path_template.expand( "projects/{project}/instructions/{instruction}", project=project, instruction=instruction, )
Return a fully-qualified instruction string.
def probe_check(name, status, device_type): status_string = PROBE_STATE.get(int(status), "unknown") if status_string == "ok": return ok, "{} '{}': {}".format(device_type, name, status_string) if status_string == "unknown": return unknown, "{} '{}': {}".format(device_type, name, status_string) return critical, "{} '{}': {}".format(device_type, name, status_string)
if the status is "ok" in the PROBE_STATE dict, return ok + string if the status is not "ok", return critical + string
def _include_term(self, term): ref_needed = False if term.relations: for k,v in six.iteritems(term.relations): for i,t in enumerate(v): try: if t.id not in self: self._include_term(t) v[i] = t.id except AttributeError: pass ref_needed = True self.terms[term.id] = term return ref_needed
Add a single term to the current ontology. It is needed to dereference any term in the term's relationship and then to build the reference again to make sure the other terms referenced in the term's relations are the one contained in the ontology (to make sure changes to one term in the ontology will be applied to every other term related to that term).
def _dilated_conv_layer(self, output_channels, dilation_rate, apply_relu, name): layer_components = [ conv.Conv2D( output_channels, [3, 3], initializers=self._initializers, regularizers=self._regularizers, rate=dilation_rate, name="dilated_conv_" + name), ] if apply_relu: layer_components.append(lambda net: tf.nn.relu(net, name="relu_" + name)) return sequential.Sequential(layer_components, name=name)
Create a dilated convolution layer. Args: output_channels: int. Number of output channels for each pixel. dilation_rate: int. Represents how many pixels each stride offset will move. A value of 1 indicates a standard convolution. apply_relu: bool. If True, a ReLU non-linearlity is added. name: string. Name for layer. Returns: a sonnet Module for a dilated convolution.
def mod_watch(name, restart=True, update=False, user=None, conf_file=None, bin_env=None, **kwargs): return running( name, restart=restart, update=update, user=user, conf_file=conf_file, bin_env=bin_env )
The supervisord watcher, called to invoke the watch command. Always restart on watch .. note:: This state exists to support special handling of the ``watch`` :ref:`requisite <requisites>`. It should not be called directly. Parameters for this function should be set by the state being triggered.
def raise_from(exc, cause): context_tb = sys.exc_info()[2] incorrect_cause = not ( (isinstance(cause, type) and issubclass(cause, Exception)) or isinstance(cause, BaseException) or cause is None ) if incorrect_cause: raise TypeError("exception causes must derive from BaseException") if cause is not None: if not getattr(cause, "__pep3134__", False): try: raise_(cause) except: cause = sys.exc_info()[1] cause.__fixed_traceback__ = context_tb try: raise_(exc) except: exc = sys.exc_info()[1] exc.__original_exception__.__suppress_context__ = True exc.__original_exception__.__cause__ = cause exc.__original_exception__.__context__ = None raise exc
Does the same as ``raise LALALA from BLABLABLA`` does in Python 3. But works in Python 2 also! Please checkout README on https://github.com/9seconds/pep3134 to get an idea about possible pitfals. But short story is: please be pretty carefull with tracebacks. If it is possible, use sys.exc_info instead. But in most cases it will work as you expect.
def standby(self): register = self.MMA8452Q_Register['CTRL_REG1'] self.board.i2c_read_request(self.address, register, 1, Constants.I2C_READ | Constants.I2C_END_TX_MASK, self.data_val, Constants.CB_TYPE_DIRECT) ctrl1 = self.wait_for_read_result() ctrl1 = (ctrl1[self.data_start]) & ~0x01 self.callback_data = [] self.board.i2c_write_request(self.address, [register, ctrl1])
Put the device into standby mode so that the registers can be set. @return: No return value
def drop_columns(records, slices): for record in records: drop = set(i for slice in slices for i in range(*slice.indices(len(record)))) keep = [i not in drop for i in range(len(record))] record.seq = Seq(''.join(itertools.compress(record.seq, keep)), record.seq.alphabet) yield record
Drop all columns present in ``slices`` from records
def response(self, text, response_type='ephemeral', attachments=None): from flask import jsonify if attachments is None: attachments = [] data = { 'response_type': response_type, 'text': text, 'attachments': attachments, } return jsonify(**data)
Return a response with json format :param text: the text returned to the client :param response_type: optional. When `in_channel` is assigned, both the response message and the initial message typed by the user will be shared in the channel. When `ephemeral` is assigned, the response message will be visible only to the user that issued the command. :param attachments: optional. A list of additional messages for rich response.
def reverse_dependencies(self, ireqs): ireqs_as_cache_values = [self.as_cache_key(ireq) for ireq in ireqs] return self._reverse_dependencies(ireqs_as_cache_values)
Returns a lookup table of reverse dependencies for all the given ireqs. Since this is all static, it only works if the dependency cache contains the complete data, otherwise you end up with a partial view. This is typically no problem if you use this function after the entire dependency tree is resolved.
async def delete(self): await r.table_name(self.table_name) \ .get(self.id) \ .delete() \ .run(await conn.get())
Deletes the model from the database.
def check(self): pathfinder = Pathfinder(True) if pathfinder.add_path(pathfinder['superfamily']) is None: raise RuntimeError("'superfamily' data directory is missing") for tool in ('hmmscan', 'phmmer', 'mast', 'blastp', 'ass3.pl', 'hmmscan.pl'): if not pathfinder.exists(tool): raise RuntimeError('Dependency {} is missing'.format(tool))
Check if data and third party tools, necessary to run the classification, are available :raises: RuntimeError
def jenkins_last_build_sha(): job_url = os.getenv('JOB_URL') job_json_url = "{0}/api/json".format(job_url) response = urllib.urlopen(job_json_url) job_data = json.loads(response.read()) last_completed_build_url = job_data['lastCompletedBuild']['url'] last_complete_build_json_url = "{0}/api/json".format(last_completed_build_url) response = urllib.urlopen(last_complete_build_json_url) last_completed_build = json.loads(response.read()) return last_completed_build[1]['lastBuiltRevision']['SHA1']
Returns the sha of the last completed jenkins build for this project. Expects JOB_URL in environment
def fmt_cut(cut): return 'Cut {from_nodes} {symbol} {to_nodes}'.format( from_nodes=fmt_mechanism(cut.from_nodes, cut.node_labels), symbol=CUT_SYMBOL, to_nodes=fmt_mechanism(cut.to_nodes, cut.node_labels))
Format a |Cut|.
def _check_rel(attrs, rel_whitelist, rel_blacklist): rels = attrs.get('rel', [None]) if rel_blacklist: for rel in rels: if rel in rel_blacklist: return False if rel_whitelist: for rel in rels: if rel in rel_whitelist: return True return False return True
Check a link's relations against the whitelist or blacklist. First, this will reject based on blacklist. Next, if there is a whitelist, there must be at least one rel that matches. To explicitly allow links without a rel you can add None to the whitelist (e.g. ['in-reply-to',None])
def save(self, path, compress=True): with aux.PartiallySafeReplace() as msr: filename = self.info['name'] + '.proteindb' filepath = aux.joinpath(path, filename) with msr.open(filepath, mode='w+b') as openfile: self._writeContainer(openfile, compress=compress)
Writes the ``.proteins`` and ``.peptides`` entries to the hard disk as a ``proteindb`` file. .. note:: If ``.save()`` is called and no ``proteindb`` file is present in the specified path a new files is generated, otherwise the old file is replaced. :param path: filedirectory to which the ``proteindb`` file is written. The output file name is specified by ``self.info['name']`` :param compress: bool, True to use zip file compression
def _makeIndentFromWidth(self, width): if self._indenter.useTabs: tabCount, spaceCount = divmod(width, self._indenter.width) return ('\t' * tabCount) + (' ' * spaceCount) else: return ' ' * width
Make indent text with specified with. Contains width count of spaces, or tabs and spaces
def merge_bed_by_name(bt): name_lines = dict() for r in bt: name = r.name name_lines[name] = name_lines.get(name, []) + [[r.chrom, r.start, r.end, r.name, r.strand]] new_lines = [] for name in name_lines.keys(): new_lines += _merge_interval_list(name_lines[name]) new_lines = ['\t'.join(map(str, x)) for x in new_lines] return pbt.BedTool('\n'.join(new_lines) + '\n', from_string=True)
Merge intervals in a bed file when the intervals have the same name. Intervals with the same name must be adjacent in the bed file.
def load(target, source_module=None): module, klass, function = _get_module(target) if not module and source_module: module = source_module if not module: raise MissingModule( "No module name supplied or source_module provided.") actual_module = sys.modules[module] if not klass: return getattr(actual_module, function) class_object = getattr(actual_module, klass) if function: return getattr(class_object, function) return class_object
Get the actual implementation of the target.
def sorted_groupby(df, groupby): start = 0 prev = df[groupby].iloc[start] for i, x in enumerate(df[groupby]): if x != prev: yield prev, df.iloc[start:i] prev = x start = i yield prev, df.iloc[start:]
Perform a groupby on a DataFrame using a specific column and assuming that that column is sorted. Parameters ---------- df : pandas.DataFrame groupby : object Column name on which to groupby. This column must be sorted. Returns ------- generator Yields pairs of group_name, DataFrame.
def redirect(view=None, url=None, **kwargs): if view: if url: kwargs["url"] = url url = flask.url_for(view, **kwargs) current_context.exit(flask.redirect(url))
Redirects to the specified view or url
def pixel_coord(self): return self.get_pixel_coordinates(self.reading.pix_coord, self.reading.get_ccd_num())
Return the coordinates of the source in the cutout reference frame. @return:
def fullConn (self, preCellsTags, postCellsTags, connParam): from .. import sim if sim.cfg.verbose: print('Generating set of all-to-all connections (rule: %s) ...' % (connParam['label'])) paramsStrFunc = [param for param in [p+'Func' for p in self.connStringFuncParams] if param in connParam] for paramStrFunc in paramsStrFunc: connParam[paramStrFunc[:-4]+'List'] = {(preGid,postGid): connParam[paramStrFunc](**{k:v if isinstance(v, Number) else v(preCellTags,postCellTags) for k,v in connParam[paramStrFunc+'Vars'].items()}) for preGid,preCellTags in preCellsTags.items() for postGid,postCellTags in postCellsTags.items()} for postCellGid in postCellsTags: if postCellGid in self.gid2lid: for preCellGid, preCellTags in preCellsTags.items(): self._addCellConn(connParam, preCellGid, postCellGid)
Generates connections between all pre and post-syn cells
def has_neigh(tag_name, params=None, content=None, left=True): def has_neigh_closure(element): if not element.parent \ or not (element.isTag() and not element.isEndTag()): return False childs = element.parent.childs childs = filter( lambda x: (x.isTag() and not x.isEndTag()) \ or x.getContent().strip() or x is element, childs ) if len(childs) <= 1: return False ioe = childs.index(element) if left and ioe > 0: return is_equal_tag(childs[ioe - 1], tag_name, params, content) if not left and ioe + 1 < len(childs): return is_equal_tag(childs[ioe + 1], tag_name, params, content) return False return has_neigh_closure
This function generates functions, which matches all tags with neighbours defined by parameters. Args: tag_name (str): Tag has to have neighbour with this tagname. params (dict): Tag has to have neighbour with this parameters. params (str): Tag has to have neighbour with this content. left (bool, default True): Tag has to have neigbour on the left, or right (set to ``False``). Returns: bool: True for every matching tag. Note: This function can be used as parameter for ``.find()`` method in HTMLElement.
def fix_style(style='basic', ax=None, **kwargs): style = _read_style(style) for s in style: if not s in style_params.keys(): avail = [f.replace('.mplstyle', '') for f in os.listdir( _get_lib()) if f.endswith('.mplstyle')] raise ValueError('{0} is not a valid style. '.format(s) + 'Please pick a style from the list available in ' + '{0}: {1}'.format(_get_lib(), avail)) _fix_style(style, ax, **kwargs)
Add an extra formatting layer to an axe, that couldn't be changed directly in matplotlib.rcParams or with styles. Apply this function to every axe you created. Parameters ---------- ax: a matplotlib axe. If None, the last axe generated is used style: string or list of string ['basic', 'article', 'poster', 'B&W','talk','origin'] one of the styles previously defined. It should match the style you chose in set_style but nothing forces you to. kwargs: dict edit any of the style_params keys. ex: >>> tight_layout=False Examples -------- plb.set_style('poster') plt.plot(a,np.cos(a)) plb.fix_style('poster',**{'draggable_legend':False}) See Also -------- :func:`~publib.publib.set_style` :func:`~publib.tools.tools.reset_defaults`
def edit_view(self, request, object_id): kwargs = {'model_admin': self, 'object_id': object_id} view_class = self.edit_view_class return view_class.as_view(**kwargs)(request)
Instantiates a class-based view to provide 'edit' functionality for the assigned model, or redirect to Wagtail's edit view if the assigned model extends 'Page'. The view class used can be overridden by changing the 'edit_view_class' attribute.
def trim(self): return for l in self._levels[-2::-1]: for n in l: if n.is_leaf: n.parent.remove_child(n.label) self._clear_all_leaves()
Trims leaves from tree that are not observed at highest-resolution level This is a bit hacky-- what it does is
def update_confirmation(self, confirmation_id, confirmation_dict): return self._create_put_request( resource=CONFIRMATIONS, billomat_id=confirmation_id, send_data=confirmation_dict )
Updates a confirmation :param confirmation_id: the confirmation id :param confirmation_dict: dict :return: dict
def _cast_to_type(self, value): if not isinstance(value, list): self.fail('invalid', value=value) return value
Raise error if the value is not a list
def update_os_version(self): self.chain.connection.log("Detecting os version") os_version = self.driver.get_os_version(self.version_text) if os_version: self.chain.connection.log("SW Version: {}".format(os_version)) self.os_version = os_version
Update os_version attribute.
def fromHTML(html, *args, **kwargs): source = BeautifulSoup(html, 'html.parser', *args, **kwargs) return TOC('[document]', source=source, descendants=source.children)
Creates abstraction using HTML :param str html: HTML :return: TreeOfContents object
def plot(self,bins=10,facecolor='0.5',plot_cols=None, filename="ensemble.pdf",func_dict = None, **kwargs): ensemble_helper(self,bins=bins,facecolor=facecolor,plot_cols=plot_cols, filename=filename)
plot ensemble histograms to multipage pdf Parameters ---------- bins : int number of bins facecolor : str color plot_cols : list of str subset of ensemble columns to plot. If None, all are plotted. Default is None filename : str pdf filename. Default is "ensemble.pdf" func_dict : dict a dict of functions to apply to specific columns (e.g., np.log10) **kwargs : dict keyword args to pass to plot_utils.ensemble_helper() Returns ------- None
def datatable_df(self): data = self._all_datatable_data() adf = pd.DataFrame(data) adf.columns = self.dt_all_cols return self._finish_df(adf, 'ALL')
returns the dataframe representation of the symbol's final data
def _cleanup_markers(context_id, task_ids): logging.debug("Cleanup %d markers for Context %s", len(task_ids), context_id) delete_entities = [ndb.Key(FuriousAsyncMarker, id) for id in task_ids] delete_entities.append(ndb.Key(FuriousCompletionMarker, context_id)) ndb.delete_multi(delete_entities) logging.debug("Markers cleaned.")
Delete the FuriousAsyncMarker entities corresponding to ids.
def cancel(self, job): if isinstance(job, self.job_class): self.connection.zrem(self.scheduled_jobs_key, job.id) else: self.connection.zrem(self.scheduled_jobs_key, job)
Pulls a job from the scheduler queue. This function accepts either a job_id or a job instance.
def include_yaml(self, node): filename = self.construct_scalar(node) if not filename.startswith('/'): if self._root is None: raise Exception('!include_yaml %s is a relative path, ' 'but stream lacks path' % filename) filename = os.path.join(self._root, self.construct_scalar(node)) with self.open(filename, 'r') as fin: return yaml.load(fin, Loader)
load another yaml file from the path specified by node's value
def day_interval(year, month, day, milliseconds=False, return_string=False): if milliseconds: delta = timedelta(milliseconds=1) else: delta = timedelta(seconds=1) start = datetime(year, month, day) end = datetime(year, month, day) + timedelta(days=1) - delta if not return_string: return start, end else: return str(start), str(end)
Return a start datetime and end datetime of a day. :param milliseconds: Minimum time resolution. :param return_string: If you want string instead of datetime, set True Usage Example:: >>> start, end = rolex.day_interval(2014, 6, 17) >>> start datetime(2014, 6, 17, 0, 0, 0) >>> end datetime(2014, 6, 17, 23, 59, 59)
def token(self, token_address: Address) -> Token: if not is_binary_address(token_address): raise ValueError('token_address must be a valid address') with self._token_creation_lock: if token_address not in self.address_to_token: self.address_to_token[token_address] = Token( jsonrpc_client=self.client, token_address=token_address, contract_manager=self.contract_manager, ) return self.address_to_token[token_address]
Return a proxy to interact with a token.
def _run_tool(cmd, use_container=True, work_dir=None, log_file=None): if isinstance(cmd, (list, tuple)): cmd = " ".join([str(x) for x in cmd]) cmd = utils.local_path_export(at_start=use_container) + cmd if log_file: cmd += " 2>&1 | tee -a %s" % log_file try: print("Running: %s" % cmd) subprocess.check_call(cmd, shell=True) finally: if use_container and work_dir: _chown_workdir(work_dir)
Run with injection of bcbio path. Place at end for runs without containers to avoid overriding other bcbio installations.
def text(self, value): self._text = value self.timestamps.edited = datetime.datetime.utcnow() self.touch(True)
Set the text value. Args: value (str): Text value.
def _get_nblock_regions(in_file, min_n_size, ref_regions): out_lines = [] called_contigs = set([]) with utils.open_gzipsafe(in_file) as in_handle: for line in in_handle: contig, start, end, ctype = line.rstrip().split() called_contigs.add(contig) if (ctype in ["REF_N", "NO_COVERAGE", "EXCESSIVE_COVERAGE", "LOW_COVERAGE"] and int(end) - int(start) > min_n_size): out_lines.append("%s\t%s\t%s\n" % (contig, start, end)) for refr in ref_regions: if refr.chrom not in called_contigs: out_lines.append("%s\t%s\t%s\n" % (refr.chrom, 0, refr.stop)) return pybedtools.BedTool("\n".join(out_lines), from_string=True)
Retrieve coordinates of regions in reference genome with no mapping. These are potential breakpoints for parallelizing analysis.
def estimate_pos_and_err_parabolic(tsvals): a = tsvals[2] - tsvals[0] bc = 2. * tsvals[1] - tsvals[0] - tsvals[2] s = a / (2 * bc) err = np.sqrt(2 / bc) return s, err
Solve for the position and uncertainty of source in one dimension assuming that you are near the maximum and the errors are parabolic Parameters ---------- tsvals : `~numpy.ndarray` The TS values at the maximum TS, and for each pixel on either side Returns ------- The position and uncertainty of the source, in pixel units w.r.t. the center of the maximum pixel
def walk(self): if conf.core.snapshots is not None: return self.snaps[conf.core.snapshots] elif conf.core.timesteps is not None: return self.steps[conf.core.timesteps] return self.snaps[-1:]
Return view on configured steps slice. Other Parameters: conf.core.snapshots: the slice of snapshots. conf.core.timesteps: the slice of timesteps.
def reformat_cmd(self, text): text = text.replace('az', '') if text and SELECT_SYMBOL['scope'] == text[0:2]: text = text.replace(SELECT_SYMBOL['scope'], "") if self.shell_ctx.default_command: text = self.shell_ctx.default_command + ' ' + text return text
reformat the text to be stripped of noise
def peek(self, fmt): pos_before = self._pos value = self.read(fmt) self._pos = pos_before return value
Interpret next bits according to format string and return result. fmt -- Token string describing how to interpret the next bits. The position in the bitstring is not changed. If not enough bits are available then all bits to the end of the bitstring will be used. Raises ReadError if not enough bits are available. Raises ValueError if the format is not understood. See the docstring for 'read' for token examples.
def read_chunks(self): if self.reading_chunks and self.got_chunk: logger.debug("Fast-Path detected, returning...") return while not self.got_request: self.reading_chunks = True self.got_chunk = False self.httpstream.read_until("\r\n", self._chunk_length) self.reading_chunks = False if self.got_chunk: logger.debug("Fast-Path detected, iterating...") continue else: break return
Read chunks from the HTTP client
def info(self): ddoc_info = self.r_session.get( '/'.join([self.document_url, '_info'])) ddoc_info.raise_for_status() return response_to_json_dict(ddoc_info)
Retrieves the design document view information data, returns dictionary GET databasename/_design/{ddoc}/_info
def path_helper(self, path, view, **kwargs): super(FlaskRestyPlugin, self).path_helper( path=path, view=view, **kwargs ) resource = self.get_state().views[view] rule = self._rules[resource.rule] operations = defaultdict(Operation) view_instance = view() view_instance.spec_declaration(view, operations, self) parameters = [] for arg in rule.arguments: parameters.append({ 'name': arg, 'in': 'path', 'required': True, 'type': 'string', }) if parameters: operations['parameters'] = parameters path.path = FlaskPlugin.flaskpath2openapi(resource.rule) path.operations = dict(**operations)
Path helper for Flask-RESTy views. :param view: An `ApiView` object.
def with_blob(self, blob): content = json.loads(blob.content) self.partition_id = content["partition_id"] self.owner = content["owner"] self.token = content["token"] self.epoch = content["epoch"] self.offset = content["offset"] self.sequence_number = content["sequence_number"] self.event_processor_context = content.get("event_processor_context")
Init Azure Blob Lease with existing blob.
def create_exception_by_error_code( errorCode, detailCode='0', description='', traceInformation=None, identifier=None, nodeId=None, ): try: dataone_exception = ERROR_CODE_TO_EXCEPTION_DICT[errorCode] except LookupError: dataone_exception = ServiceFailure return dataone_exception( detailCode, description, traceInformation, identifier, nodeId )
Create a DataONE Exception object by errorCode. See Also: For args, see: ``DataONEException()``
def batch_message(cls, batch, request_ids): assert isinstance(batch, Batch) if not cls.allow_batches: raise ProtocolError.invalid_request( 'protocol does not permit batches') id_iter = iter(request_ids) rm = cls.request_message nm = cls.notification_message parts = (rm(request, next(id_iter)) if isinstance(request, Request) else nm(request) for request in batch) return cls.batch_message_from_parts(parts)
Convert a request Batch to a message.
def _try_assign_utc_time(self, raw_time, time_base): if raw_time != IOTileEvent.InvalidRawTime and (raw_time & (1 << 31)): y2k_offset = self.raw_time ^ (1 << 31) return self._Y2KReference + datetime.timedelta(seconds=y2k_offset) if time_base is not None: return time_base + datetime.timedelta(seconds=raw_time) return None
Try to assign a UTC time to this reading.
def copy(self): self_copy = self.dup() self_copy._scopes = copy.copy(self._scopes) return self_copy
Return a copy of this object.
def _param_types_to_shape(self, param_types: Optional[str]) -> Sequence[int]: param_types = [] if param_types is None else param_types shape = tuple(self.object_table[ptype]['size'] for ptype in param_types) return shape
Returns the fluent shape given its `param_types`.
def pip(usr_pswd=None): try: cmd('which pip') except: return print('-[pip]----------') p = cmd('pip list --outdated') if not p: return pkgs = getPackages(p) for i, p in enumerate(pkgs): if p in ['pip', 'setuptools']: cmd('pip install -U ' + p, usr_pwd=usr_pswd, run=global_run) pkgs.pop(i) for p in pkgs: cmd('pip install -U ' + p, usr_pwd=usr_pswd, run=global_run)
This updates one package at a time. Could do all at once: pip list --outdated | cut -d' ' -f1 | xargs pip install --upgrade
def interpolate_single(start, end, coefficient, how='linear'): return INTERP_SINGLE_DICT[how](start, end, coefficient)
Interpolate single value between start and end in given number of steps
def seek(self, pos=0): if pos - self.pos >= 0: blocks, remainder = divmod(pos - self.pos, self.bufsize) for i in range(blocks): self.read(self.bufsize) self.read(remainder) else: raise StreamError("seeking backwards is not allowed") return self.pos
Set the stream's file pointer to pos. Negative seeking is forbidden.
def _update_aes(self): if salt.master.SMaster.secrets['aes']['secret'].value != self.crypticle.key_string: self.crypticle = salt.crypt.Crypticle(self.opts, salt.master.SMaster.secrets['aes']['secret'].value) return True return False
Check to see if a fresh AES key is available and update the components of the worker
def uninstall(cert_name, keychain="/Library/Keychains/System.keychain", keychain_password=None): if keychain_password is not None: unlock_keychain(keychain, keychain_password) cmd = 'security delete-certificate -c "{0}" {1}'.format(cert_name, keychain) return __salt__['cmd.run'](cmd)
Uninstall a certificate from a keychain cert_name The name of the certificate to remove keychain The keychain to install the certificate to, this defaults to /Library/Keychains/System.keychain keychain_password If your keychain is likely to be locked pass the password and it will be unlocked before running the import Note: The password given here will show up as plaintext in the returned job info. CLI Example: .. code-block:: bash salt '*' keychain.install test.p12 test123
def append(self, listname, xy_idx, var_name, element_name): self.resize() string = '{0} {1}' if listname not in ['unamex', 'unamey', 'fnamex', 'fnamey']: logger.error('Wrong list name for varname.') return elif listname in ['fnamex', 'fnamey']: string = '${0}\\ {1}$' if isinstance(element_name, list): for i, j in zip(xy_idx, element_name): if listname == 'fnamex' or listname == 'fnamey': j = j.replace(' ', '\\ ') self.__dict__[listname][i] = string.format(var_name, j) elif isinstance(element_name, int): self.__dict__[listname][xy_idx] = string.format( var_name, element_name) else: logger.warning( 'Unknown element_name type while building varname')
Append variable names to the name lists
def user_pass(self, func=None, location=None, **rkwargs): def wrapper(view): view = to_coroutine(view) @functools.wraps(view) async def handler(request, *args, **kwargs): await self.check_user(request, func, location, **rkwargs) return await view(request, *args, **kwargs) return handler return wrapper
Decorator ensures that user pass the given func.
def before_content(self): ChapelObject.before_content(self) if self.names: self.env.temp_data['chpl:class'] = self.names[0][0] self.clsname_set = True
Called before parsing content. Push the class name onto the class name stack. Used to construct the full name for members.
def execute(self, query_string, params=None): cr = self.connection.cursor() logger.info("SQL: %s (%s)", query_string, params) self.last_query = (query_string, params) t0 = time.time() cr.execute(query_string, params or self.core.empty_params) ms = (time.time() - t0) * 1000 logger.info("RUNTIME: %.2f ms", ms) self._update_cursor_stats(cr) return cr
Executes a query. Returns the resulting cursor. :query_string: the parameterized query string :params: can be either a tuple or a dictionary, and must match the parameterization style of the query :return: a cursor object
def prepare_for_negotiated_authenticate( self, entityid=None, relay_state="", binding=None, vorg="", nameid_format=None, scoping=None, consent=None, extensions=None, sign=None, response_binding=saml2.BINDING_HTTP_POST, **kwargs): expected_binding = binding for binding in [BINDING_HTTP_REDIRECT, BINDING_HTTP_POST]: if expected_binding and binding != expected_binding: continue destination = self._sso_location(entityid, binding) logger.info("destination to provider: %s", destination) reqid, request = self.create_authn_request( destination, vorg, scoping, response_binding, nameid_format, consent=consent, extensions=extensions, sign=sign, **kwargs) _req_str = str(request) logger.info("AuthNReq: %s", _req_str) try: args = {'sigalg': kwargs["sigalg"]} except KeyError: args = {} http_info = self.apply_binding(binding, _req_str, destination, relay_state, sign=sign, **args) return reqid, binding, http_info else: raise SignOnError( "No supported bindings available for authentication")
Makes all necessary preparations for an authentication request that negotiates which binding to use for authentication. :param entityid: The entity ID of the IdP to send the request to :param relay_state: To where the user should be returned after successfull log in. :param binding: Which binding to use for sending the request :param vorg: The entity_id of the virtual organization I'm a member of :param nameid_format: :param scoping: For which IdPs this query are aimed. :param consent: Whether the principal have given her consent :param extensions: Possible extensions :param sign: Whether the request should be signed or not. :param response_binding: Which binding to use for receiving the response :param kwargs: Extra key word arguments :return: session id and AuthnRequest info
def replace_in_files(dirname, replace): filepath = os.path.abspath(dirname / "requirements.in") if os.path.isfile(filepath) and header_footer_exists(filepath): replaced = re.sub(Utils.exp, replace, get_file_string(filepath)) with open(filepath, "w") as f: f.write(replaced) print(color( "Written to file: {}".format(filepath), fg='magenta', style='bold'))
Replace current version with new version in requirements files.
def get_path(self, dir=None): if not dir: dir = self.fs.getcwd() if self == dir: return '.' path_elems = self.get_path_elements() pathname = '' try: i = path_elems.index(dir) except ValueError: for p in path_elems[:-1]: pathname += p.dirname else: for p in path_elems[i+1:-1]: pathname += p.dirname return pathname + path_elems[-1].name
Return path relative to the current working directory of the Node.FS.Base object that owns us.
def get_stage_events(cls, crawler, stage_name, start, end, level=None): key = make_key(crawler, "events", stage_name, level) return cls.event_list(key, start, end)
events from a particular stage
async def _reset_protocol(self, exc=None): protocol = await self._get_protocol() await protocol.shutdown() self._protocol = None for ob_error in self._observations_err_callbacks: ob_error(exc) self._observations_err_callbacks.clear()
Reset the protocol if an error occurs.
def fire(self, *args, **kargs): self._time_secs_old = time.time() with self._hlock: handler_list = copy.copy(self._handler_list) result_list = [] for handler in handler_list: if self._sync_mode: result = self._execute(handler, *args, **kargs) if isinstance(result, tuple) and len(result) == 3 and isinstance(result[1], Exception): one_res_tuple = (False, self._error(result), handler) else: one_res_tuple = (True, result, handler) else: EventSystem._async_queue.put((handler, args, kargs)) one_res_tuple = (None, None, handler) result_list.append(one_res_tuple) time_secs_new = time.time() self.duration_secs = time_secs_new - self._time_secs_old self._time_secs_old = time_secs_new return result_list
collects results of all executed handlers
def thesauri(self, token: dict = None, prot: str = "https") -> dict: thez_url = "{}://v1.{}.isogeo.com/thesauri".format(prot, self.api_url) thez_req = self.get( thez_url, headers=self.header, proxies=self.proxies, verify=self.ssl ) checker.check_api_response(thez_req) return thez_req.json()
Get list of available thesauri. :param str token: API auth token :param str prot: https [DEFAULT] or http (use it only for dev and tracking needs).
def list_parse(name_list): if name_list and name_list[0] == '@': value = name_list[1:] if not os.path.exists(value): log.warning('The file %s does not exist' % value) return try: return [v.strip() for v in open(value, 'r').readlines()] except IOError as e: log.warning('reading %s failed: %s; ignoring this file' % (value, e)) else: return [v.strip() for v in name_list.split(',')]
Parse a comma-separated list of values, or a filename (starting with @) containing a list value on each line.
def _make_fn_text(self): if not self._f: text = "(not loaded)" elif self._f.filename: text = os.path.relpath(self._f.filename, ".") else: text = "(filename not set)" return text
Makes filename text
def _convert_args(args): converted = [] for arg in args: if isinstance(arg, dict): for key in list(arg.keys()): if key == '__kwarg__': continue converted.append('{0}={1}'.format(key, arg[key])) else: converted.append(arg) return converted
Take a list of args, and convert any dicts inside the list to keyword args in the form of `key=value`, ready to be passed to salt-ssh
def layers(self): if self._layers is None: self.__init() lyrs = [] for lyr in self._layers: lyr['object'] = GlobeServiceLayer(url=self._url + "/%s" % lyr['id'], securityHandler=self._securityHandler, proxy_port=self._proxy_port, proxy_url=self._proxy_url) lyrs.append(lyr) return lyrs
gets the globe service layers
def _update_range(self, data, **kwargs): self._client.update_range(data=data, **kwargs)
Update range with data Args: data (bytes): data.
def get_next_tag(cls, el): sibling = el.next_sibling while not cls.is_tag(sibling) and sibling is not None: sibling = sibling.next_sibling return sibling
Get next sibling tag.
def assemble_flash_code(self, asm): stream = StringIO(asm) worker = assembler.Assembler(self.processor, stream) try: result = worker.assemble() except BaseException as e: return e, None self.flash.program(result) return None, result
assemble the given code and program the Flash
def colorize_format(self, fmt, style=DEFAULT_FORMAT_STYLE): result = [] parser = FormatStringParser(style=style) for group in parser.get_grouped_pairs(fmt): applicable_styles = [self.nn.get(self.field_styles, token.name) for token in group if token.name] if sum(map(bool, applicable_styles)) == 1: result.append(ansi_wrap( ''.join(token.text for token in group), **next(s for s in applicable_styles if s) )) else: for token in group: text = token.text if token.name: field_styles = self.nn.get(self.field_styles, token.name) if field_styles: text = ansi_wrap(text, **field_styles) result.append(text) return ''.join(result)
Rewrite a logging format string to inject ANSI escape sequences. :param fmt: The log format string. :param style: One of the characters ``%``, ``{`` or ``$`` (defaults to :data:`DEFAULT_FORMAT_STYLE`). :returns: The logging format string with ANSI escape sequences. This method takes a logging format string like the ones you give to :class:`logging.Formatter` and processes it as follows: 1. First the logging format string is separated into formatting directives versus surrounding text (according to the given `style`). 2. Then formatting directives and surrounding text are grouped based on whitespace delimiters (in the surrounding text). 3. For each group styling is selected as follows: 1. If the group contains a single formatting directive that has a style defined then the whole group is styled accordingly. 2. If the group contains multiple formatting directives that have styles defined then each formatting directive is styled individually and surrounding text isn't styled. As an example consider the default log format (:data:`DEFAULT_LOG_FORMAT`):: %(asctime)s %(hostname)s %(name)s[%(process)d] %(levelname)s %(message)s The default field styles (:data:`DEFAULT_FIELD_STYLES`) define a style for the `name` field but not for the `process` field, however because both fields are part of the same whitespace delimited token they'll be highlighted together in the style defined for the `name` field.
def backspace(self): if self._cx + self._cw >= 0: self.erase() self._cx -= self._cw self.flush()
Moves the cursor one place to the left, erasing the character at the current position. Cannot move beyond column zero, nor onto the previous line.
def price_dec(raw_price, default=_not_defined): try: price = price_str(raw_price) return decimal.Decimal(price) except ValueError as err: if default == _not_defined: raise err return default
Price decimal value from raw string. Extract price value from input raw string and present as Decimal number. If raw price does not contain valid price value or contains more than one price value, then return default value. If default value not set, then raise ValueError. :param str raw_price: string that contains price value. :param default: value that will be returned if raw price not valid. :return: Decimal price value. :raise ValueError: error if raw price not valid and default value not set.
def mtf_range(mesh, dim, dtype, name=None): dim = convert_to_dimension(dim) with tf.variable_scope(name, default_name="range"): if dtype == tf.bfloat16: tf_range = tf.cast(tf.range(dim.size), tf.bfloat16) else: tf_range = tf.range(dim.size, dtype=dtype) return import_tf_tensor(mesh, tf_range, shape=Shape([dim]))
Create a 1d mesh tensor with a range from [0, dim.size). Call externally as mtf.range() Args: mesh: a Mesh dim: a Dimension dtype: a tf.DType name: an optional string Returns: a Tensor
def _dead_assignment_elimination(self, function, data_graph): register_pvs = set() for node in data_graph.nodes(): if isinstance(node.variable, SimRegisterVariable) and \ node.variable.reg is not None and \ node.variable.reg < 40: register_pvs.add(node) for reg in register_pvs: out_edges = data_graph.out_edges(reg, data=True) consumers = [ ] killers = [ ] for _, _, data in out_edges: if 'type' in data and data['type'] == 'kill': killers.append(data) else: consumers.append(data) if not consumers and killers: da = DeadAssignment(reg) self.dead_assignments.append(da)
Remove assignments to registers that has no consumers, but immediately killed. BROKEN - DO NOT USE IT :param angr.knowledge.Function function: :param networkx.MultiDiGraph data_graph: :return: None
def lock_resource_for_update(cls, resource_id, db_session): db_session = get_db_session(db_session) query = db_session.query(cls.model) query = query.filter(cls.model.resource_id == resource_id) query = query.with_for_update() return query.first()
Selects resource for update - locking access for other transactions :param resource_id: :param db_session: :return:
def load_ui_type(uifile): import pysideuic import xml.etree.ElementTree as ElementTree from cStringIO import StringIO parsed = ElementTree.parse(uifile) widget_class = parsed.find('widget').get('class') form_class = parsed.find('class').text with open(uifile, 'r') as f: o = StringIO() frame = {} pysideuic.compileUi(f, o, indent=0) pyc = compile(o.getvalue(), '<string>', 'exec') exec(pyc) in frame form_class = frame['Ui_%s' % form_class] base_class = eval('QtWidgets.%s' % widget_class) return form_class, base_class
Pyside equivalent for the loadUiType function in PyQt. From the PyQt4 documentation: Load a Qt Designer .ui file and return a tuple of the generated form class and the Qt base class. These can then be used to create any number of instances of the user interface without having to parse the .ui file more than once. Note: Pyside lacks the "loadUiType" command, so we have to convert the ui file to py code in-memory first and then execute it in a special frame to retrieve the form_class. Args: uifile (str): Absolute path to .ui file Returns: tuple: the generated form class, the Qt base class
def _handle_continuations(self, response, cache_key): rcontinue = response.get('continue') listen = ['blcontinue', 'cmcontinue', 'plcontinue'] cparams = {} if rcontinue: for flag in listen: if rcontinue.get(flag): cparams[flag] = rcontinue.get(flag) if cparams: self.data['continue'] = cparams del self.cache[cache_key] else: if 'continue' in self.data: del self.data['continue']
Select continue params and clear cache or last continue params
def image_resources(package=None, directory='resources'): if not package: package = calling_package() package_dir = '.'.join([package, directory]) images = [] for i in resource_listdir(package, directory): if i.startswith('__') or i.endswith('.egg-info'): continue fname = resource_filename(package_dir, i) if resource_isdir(package_dir, i): images.extend(image_resources(package_dir, i)) elif what(fname): images.append(fname) return images
Returns all images under the directory relative to a package path. If no directory or package is specified then the resources module of the calling package will be used. Images are recursively discovered. :param package: package name in dotted format. :param directory: path relative to package path of the resources directory. :return: a list of images under the specified resources path.
def async_save_result(self): if hasattr(self, "_async_future") and self._async_future.done(): self._async_future.result() return True else: return False
Retrieves the result of this subject's asynchronous save. - Returns `True` if the subject was saved successfully. - Raises `concurrent.futures.CancelledError` if the save was cancelled. - If the save failed, raises the relevant exception. - Returns `False` if the subject hasn't finished saving or if the subject has not been queued for asynchronous save.
def before_sample(analysis_request): if not analysis_request.getDateSampled(): analysis_request.setDateSampled(DateTime()) if not analysis_request.getSampler(): analysis_request.setSampler(api.get_current_user().id)
Method triggered before "sample" transition for the Analysis Request passed in is performed
def _init_from_file(self, filename): if not filename.endswith("detx"): raise NotImplementedError('Only the detx format is supported.') self._open_file(filename) self._extract_comments() self._parse_header() self._parse_doms() self._det_file.close()
Create detector from detx file.
def create_pattern(cls, userdata): empty = cls.create_empty(None) userdata_dict = cls.normalize(empty, userdata) return Userdata(userdata_dict)
Create a user data instance with all values the same.