code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def initiate_upgrade_action_and_wait(self, components_mask, action, timeout=2, interval=0.1): try: self.initiate_upgrade_action(components_mask, action) except CompletionCodeError as e: if e.cc == CC_LONG_DURATION_CMD_IN_PROGRESS: self.wait_for_long_duration_command( constants.CMDID_HPM_INITIATE_UPGRADE_ACTION, timeout, interval) else: raise HpmError('initiate_upgrade_action CC=0x%02x' % e.cc)
Initiate Upgrade Action and wait for long running command.
def battery_reported(self, voltage, rawVoltage): self._update_attribute(BATTERY_PERCENTAGE_REMAINING, voltage) self._update_attribute(self.BATTERY_VOLTAGE_ATTR, int(rawVoltage / 100))
Battery reported.
def check_no_self_dependency(cls, dap): problems = list() if 'package_name' in dap.meta and 'dependencies' in dap.meta: dependencies = set() for dependency in dap.meta['dependencies']: if 'dependencies' in dap._badmeta and dependency in dap._badmeta['dependencies']: continue if not re.search(r'[<=>]', dependency): dependencies.add(dependency) for mark in ['==', '>=', '<=', '<', '>']: dep = dependency.split(mark) if len(dep) == 2: dependencies.add(dep[0].strip()) break if dap.meta['package_name'] in dependencies: msg = 'Depends on dap with the same name as itself' problems.append(DapProblem(msg)) return problems
Check that the package does not depend on itself. Return a list of problems.
def _repr_pretty_(self, p, cycle): from IPython.lib.pretty import RepresentationPrinter assert isinstance(p, RepresentationPrinter) p.begin_group(1, "SparkConfiguration(") def kv(k, v, do_comma=True): p.text(k) p.pretty(v) if do_comma: p.text(", ") p.breakable() kv("launcher_arguments: ", self._spark_launcher_args) kv("conf: ", self._spark_conf_helper) kv("spark_home: ", self.spark_home) kv("python_path: ", self._python_path, False) p.end_group(1, ')')
Pretty printer for the spark cnofiguration
def configure(self, repositories): self.enable_repositories(repositories) self.create_stack_user() self.install_base_packages() self.clean_system() self.yum_update(allow_reboot=True) self.install_osp() self.set_selinux('permissive') self.fix_hostname()
Prepare the system to be ready for an undercloud installation.
def from_coordinates(cls, coordinates): prim = cls() for coord in coordinates: pm = PseudoMonomer(ampal_parent=prim) pa = PseudoAtom(coord, ampal_parent=pm) pm.atoms = OrderedDict([('CA', pa)]) prim.append(pm) prim.relabel_all() return prim
Creates a `Primitive` from a list of coordinates.
def regularize_hidden(p0, P, reversible=True, stationary=False, C=None, eps=None): n = P.shape[0] if eps is None: eps = 0.01 / n P = np.maximum(P, eps) P /= P.sum(axis=1)[:, None] if reversible: P = _tmatrix_disconnected.enforce_reversible_on_closed(P) if stationary: _tmatrix_disconnected.stationary_distribution(P, C=C) else: p0 = np.maximum(p0, eps) p0 /= p0.sum() return p0, P
Regularizes the hidden initial distribution and transition matrix. Makes sure that the hidden initial distribution and transition matrix have nonzero probabilities by setting them to eps and then renormalizing. Avoids zeros that would cause estimation algorithms to crash or get stuck in suboptimal states. Parameters ---------- p0 : ndarray(n) Initial hidden distribution of the HMM P : ndarray(n, n) Hidden transition matrix reversible : bool HMM is reversible. Will make sure it is still reversible after modification. stationary : bool p0 is the stationary distribution of P. In this case, will not regularize p0 separately. If stationary=False, the regularization will be applied to p0. C : ndarray(n, n) Hidden count matrix. Only needed for stationary=True and P disconnected. epsilon : float or None minimum value of the resulting transition matrix. Default: evaluates to 0.01 / n. The coarse-graining equation can lead to negative elements and thus epsilon should be set to at least 0. Positive settings of epsilon are similar to a prior and enforce minimum positive values for all transition probabilities. Return ------ p0 : ndarray(n) regularized initial distribution P : ndarray(n, n) regularized transition matrix
def update_graderoster(graderoster, requestor): label = graderoster.graderoster_label() url = "{}/{}".format(graderoster_url, encode_section_label(label)) headers = {"Content-Type": "application/xhtml+xml", "Connection": "keep-alive", "X-UW-Act-as": requestor.uwnetid} body = graderoster.xhtml() response = SWS_GradeRoster_DAO().putURL(url, headers, body) if response.status != 200: root = etree.fromstring(response.data) msg = root.find(".//*[@class='status_description']").text.strip() raise DataFailureException(url, response.status, msg) return GradeRoster(data=etree.fromstring(response.data.strip()), section=graderoster.section, instructor=graderoster.instructor)
Updates the graderoster resource for the passed restclients.GradeRoster model. A new restclients.GradeRoster is returned, representing the document returned from the update request.
def OnUpdate(self, event): if wx.ID_UNDO in self.id2menuitem: undo_item = self.id2menuitem[wx.ID_UNDO] undo_item.Enable(undo.stack().canundo()) if wx.ID_REDO in self.id2menuitem: redo_item = self.id2menuitem[wx.ID_REDO] redo_item.Enable(undo.stack().canredo()) event.Skip()
Menu state update
def farray(self): bandwidths = 2 * pi ** (1/2.) * self.frequencies / self.q return self.frequencies - bandwidths / 2.
Array of frequencies for the lower-edge of each frequency bin :type: `numpy.ndarray`
def list_unit_states(self, machine_id=None, unit_name=None): for page in self._request('UnitState.List', machineID=machine_id, unitName=unit_name): for state in page.get('states', []): yield UnitState(data=state)
Return the current UnitState for the fleet cluster Args: machine_id (str): filter all UnitState objects to those originating from a specific machine unit_name (str): filter all UnitState objects to those related to a specific unit Yields: UnitState: The next UnitState in the cluster Raises: fleet.v1.errors.APIError: Fleet returned a response code >= 400
def _get_current_deployment_id(self): deploymentId = '' stage = __salt__['boto_apigateway.describe_api_stage'](restApiId=self.restApiId, stageName=self._stage_name, **self._common_aws_args).get('stage') if stage: deploymentId = stage.get('deploymentId') return deploymentId
Helper method to find the deployment id that the stage name is currently assocaited with.
def _get_client(self, client=True): client = client or None if client is True and get_client is None: log.debug("'dask.distributed' library was not found, will " "use simple serial processing.") client = None elif client is True: try: client = get_client() except ValueError: log.warning("No dask distributed client was provided or found, " "but distributed features were requested. Will use simple serial processing.") client = None return client
Determine what dask distributed client to use.
def build_parameters(self, stack, provider_stack=None): resolved = _resolve_parameters(stack.parameter_values, stack.blueprint) required_parameters = list(stack.required_parameter_definitions) all_parameters = list(stack.all_parameter_definitions) parameters = _handle_missing_parameters(resolved, all_parameters, required_parameters, provider_stack) param_list = [] for key, value in parameters: param_dict = {"ParameterKey": key} if value is UsePreviousParameterValue: param_dict["UsePreviousValue"] = True else: param_dict["ParameterValue"] = str(value) param_list.append(param_dict) return param_list
Builds the CloudFormation Parameters for our stack. Args: stack (:class:`stacker.stack.Stack`): A stacker stack provider_stack (dict): An optional Stacker provider object Returns: dict: The parameters for the given stack
def _component_of(name): segments = name.split('.') while segments: test = '.'.join(segments) if test in settings.get('COMPONENTS', []): return test segments.pop() if not segments and '.models' in name: return _component_of(name.replace('.models', ''))
Get the root package or module of the passed module.
def dweet_for(thing_name, payload, key=None, session=None): if key is not None: params = {'key': key} else: params = None return _send_dweet(payload, '/dweet/for/{0}'.format(thing_name), params=params, session=session)
Send a dweet to dweet.io for a thing with a known name
def decode(self): "Decode self.buffer, populating instance variables and return self." buflen = len(self.buffer) tftpassert(buflen >= 4, "malformed ERR packet, too short") log.debug("Decoding ERR packet, length %s bytes", buflen) if buflen == 4: log.debug("Allowing this affront to the RFC of a 4-byte packet") fmt = b"!HH" log.debug("Decoding ERR packet with fmt: %s", fmt) self.opcode, self.errorcode = struct.unpack(fmt, self.buffer) else: log.debug("Good ERR packet > 4 bytes") fmt = b"!HH%dsx" % (len(self.buffer) - 5) log.debug("Decoding ERR packet with fmt: %s", fmt) self.opcode, self.errorcode, self.errmsg = struct.unpack(fmt, self.buffer) log.error("ERR packet - errorcode: %d, message: %s" % (self.errorcode, self.errmsg)) return self
Decode self.buffer, populating instance variables and return self.
def shrink_patch(patch_path, target_file): logging.debug("Shrinking patch file %s to keep only %s changes.", patch_path, target_file) shrinked_lines = [] patch_file = None try: patch_file = open(patch_path) adding = False search_line = "diff --git a/%s b/%s" % (target_file, target_file) for line in patch_file.read().split("\n"): if adding and line.startswith("diff --git a/") and line != search_line: adding = False elif line == search_line: adding = True if adding: shrinked_lines.append(line) finally: if patch_file: patch_file.close() if len(shrinked_lines): patch_file = None try: patch_file = open(patch_path, "w") content = "\n".join(shrinked_lines) if not content.endswith("\n"): content = content + "\n" patch_file.write(content) finally: if patch_file: patch_file.close() return True else: return False
Shrinks a patch on patch_path to contain only changes for target_file. :param patch_path: path to the shrinked patch file :param target_file: filename of a file of which changes should be kept :return: True if the is a section containing changes for target_file, Flase otherwise
def _get_app_config(self, app_name): matches = [app_config for app_config in apps.get_app_configs() if app_config.name == app_name] if not matches: return return matches[0]
Returns an app config for the given name, not by label.
def download_directory(self, remote_path, local_path, progress=None): urn = Urn(remote_path, directory=True) if not self.is_dir(urn.path()): raise OptionNotValid(name='remote_path', value=remote_path) if os.path.exists(local_path): shutil.rmtree(local_path) os.makedirs(local_path) for resource_name in self.list(urn.path()): _remote_path = f'{urn.path()}{resource_name}' _local_path = os.path.join(local_path, resource_name) self.download(local_path=_local_path, remote_path=_remote_path, progress=progress)
Downloads directory and downloads all nested files and directories from remote WebDAV to local. If there is something on local path it deletes directories and files then creates new. :param remote_path: the path to directory for downloading form WebDAV server. :param local_path: the path to local directory for saving downloaded files and directories. :param progress: Progress function. Not supported now.
def fulfill(self, agreement_id, message, account_address, signature, from_account): return self._fulfill( agreement_id, message, account_address, signature, transact={'from': from_account.address, 'passphrase': from_account.password} )
Fulfill the sign conditon. :param agreement_id: id of the agreement, hex str :param message: :param account_address: ethereum account address, hex str :param signature: signed agreement hash, hex str :param from_account: Account doing the transaction :return:
def getmacbyip6(ip6, chainCC=0): if isinstance(ip6, Net6): ip6 = str(ip6) if in6_ismaddr(ip6): mac = in6_getnsmac(inet_pton(socket.AF_INET6, ip6)) return mac iff, a, nh = conf.route6.route(ip6) if iff == scapy.consts.LOOPBACK_INTERFACE: return "ff:ff:ff:ff:ff:ff" if nh != '::': ip6 = nh mac = conf.netcache.in6_neighbor.get(ip6) if mac: return mac res = neighsol(ip6, a, iff, chainCC=chainCC) if res is not None: if ICMPv6NDOptDstLLAddr in res: mac = res[ICMPv6NDOptDstLLAddr].lladdr else: mac = res.src conf.netcache.in6_neighbor[ip6] = mac return mac return None
Returns the MAC address corresponding to an IPv6 address neighborCache.get() method is used on instantiated neighbor cache. Resolution mechanism is described in associated doc string. (chainCC parameter value ends up being passed to sending function used to perform the resolution, if needed)
def create_incident(**kwargs): incidents = cachet.Incidents(endpoint=ENDPOINT, api_token=API_TOKEN) if 'component_id' in kwargs: return incidents.post(name=kwargs['name'], message=kwargs['message'], status=kwargs['status'], component_id=kwargs['component_id'], component_status=kwargs['component_status']) else: return incidents.post(name=kwargs['name'], message=kwargs['message'], status=kwargs['status'])
Creates an incident
def format(self, text, width=78, indent=4): return textwrap.fill( text, width=width, initial_indent=' ' * indent, subsequent_indent=' ' * indent, )
Apply textwrap to a given text string
def index(request): recent_jobs = JobRecord.objects.order_by("-start_time")[0:100] recent_trials = TrialRecord.objects.order_by("-start_time")[0:500] total_num = len(recent_trials) running_num = sum(t.trial_status == Trial.RUNNING for t in recent_trials) success_num = sum( t.trial_status == Trial.TERMINATED for t in recent_trials) failed_num = sum(t.trial_status == Trial.ERROR for t in recent_trials) job_records = [] for recent_job in recent_jobs: job_records.append(get_job_info(recent_job)) context = { "log_dir": AUTOMLBOARD_LOG_DIR, "reload_interval": AUTOMLBOARD_RELOAD_INTERVAL, "recent_jobs": job_records, "job_num": len(job_records), "trial_num": total_num, "running_num": running_num, "success_num": success_num, "failed_num": failed_num } return render(request, "index.html", context)
View for the home page.
def close(self): self.save() self.manager.remove_temp() self.cfb.close() self.is_open = False self.f.close()
Close the file. A closed file cannot be read or written any more.
def on(cls, hook): def decorator(function_): cls._hooks[hook].append(function_) return function_ return decorator
Hook decorator.
def import_config(config_path): if not os.path.isfile(config_path): raise ConfigBuilderError( 'Could not find config file: ' + config_path) loader = importlib.machinery.SourceFileLoader(config_path, config_path) module = loader.load_module() if not hasattr(module, 'config') or not isinstance(module.config, Config): raise ConfigBuilderError( 'Could not load config file "{}": config files must contain ' 'a variable called "config" that is ' 'assigned to a Config object.'.format(config_path)) return module.config
Import a Config from a given path, relative to the current directory. The module specified by the config file must contain a variable called `configuration` that is assigned to a Config object.
def cmpname(name1, name2): if name1 is None and name2 is None: return 0 if name1 is None: return -1 if name2 is None: return 1 lower_name1 = name1.lower() lower_name2 = name2.lower() if lower_name1 == lower_name2: return 0 return -1 if lower_name1 < lower_name2 else 1
Compare two CIM names for equality and ordering. The comparison is performed case-insensitively. One or both of the items may be `None`, and `None` is considered the lowest possible value. The implementation delegates to the '==' and '<' operators of the name datatypes. If name1 == name2, 0 is returned. If name1 < name2, -1 is returned. Otherwise, +1 is returned.
def get_schema(frame, name, keys=None, con=None, dtype=None): pandas_sql = pandasSQL_builder(con=con) return pandas_sql._create_sql_schema(frame, name, keys=keys, dtype=dtype)
Get the SQL db table schema for the given frame. Parameters ---------- frame : DataFrame name : string name of SQL table keys : string or sequence, default: None columns to use a primary key con: an open SQL database connection object or a SQLAlchemy connectable Using SQLAlchemy makes it possible to use any DB supported by that library, default: None If a DBAPI2 object, only sqlite3 is supported. dtype : dict of column name to SQL type, default None Optional specifying the datatype for columns. The SQL type should be a SQLAlchemy type, or a string for sqlite3 fallback connection.
def ncpos(string, chars, start): string = stypes.stringToCharP(string) chars = stypes.stringToCharP(chars) start = ctypes.c_int(start) return libspice.ncpos_c(string, chars, start)
Find the first occurrence in a string of a character NOT belonging to a collection of characters, starting at a specified location searching forward. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ncpos_c.html :param string: Any character string. :type string: str :param chars: A collection of characters. :type chars: str :param start: Position to begin looking for one not in chars. :type start: int :return: index :rtype: int
def _run_queries(self, queries, *args, **kwargs): f = self._get_file() for q in queries: f.write("{};\n".format(q)) f.close() psql_args = self._get_args('psql', '-X', '-f {}'.format(f.name)) return self._run_cmd(' '.join(psql_args), *args, **kwargs)
run the queries queries -- list -- the queries to run return -- string -- the results of the query?
def tag_list(self): tags = [tag.strip() for tag in self.tag_string.split(",")] return sorted(filter(None, tags))
Return a plain python list containing all of this Entry's tags.
def stage_import_from_file(self, fd, filename='upload.gz'): schema = ImportSchema() resp = self.service.post(self.base, files={'file': (filename, fd)}) return self.service.decode(schema, resp)
Stage an import from a file upload. :param fd: File-like object to upload. :param filename: (optional) Filename to use for import as string. :return: :class:`imports.Import <imports.Import>` object
def show_terms_if_not_agreed(context, field=TERMS_HTTP_PATH_FIELD): request = context['request'] url = urlparse(request.META[field]) not_agreed_terms = TermsAndConditions.get_active_terms_not_agreed_to(request.user) if not_agreed_terms and is_path_protected(url.path): return {'not_agreed_terms': not_agreed_terms, 'returnTo': url.path} else: return {}
Displays a modal on a current page if a user has not yet agreed to the given terms. If terms are not specified, the default slug is used. A small snippet is included into your template if a user who requested the view has not yet agreed the terms. The snippet takes care of displaying a respective modal.
def picard_index_ref(picard, ref_file): dict_file = "%s.dict" % os.path.splitext(ref_file)[0] if not file_exists(dict_file): with file_transaction(picard._config, dict_file) as tx_dict_file: opts = [("REFERENCE", ref_file), ("OUTPUT", tx_dict_file)] picard.run("CreateSequenceDictionary", opts) return dict_file
Provide a Picard style dict index file for a reference genome.
def exhaustive_ontology_ilx_diff_row_only( self, ontology_row: dict ) -> dict: results = [] header = ['Index'] + list(self.existing_ids.columns) for row in self.existing_ids.itertuples(): row = {header[i]:val for i, val in enumerate(row)} check_list = [ { 'external_ontology_row': ontology_row, 'ilx_rows': [row], }, ] result = self.__exhaustive_diff(check_list)[0][0] if result['same']: results.append(result) return results
WARNING RUNTIME IS AWEFUL
def image_needs_building(image): d = docker_client() try: d.images.get(image) except docker.errors.ImageNotFound: pass else: return False return image_needs_pushing(image)
Return whether an image needs building Checks if the image exists (ignores commit range), either locally or on the registry. Args: image (str): the `repository:tag` image to be build. Returns: True: if image needs to be built False: if not (image already exists)
def check_int(**params): for p in params: if not isinstance(params[p], numbers.Integral): raise ValueError( "Expected {} integer, got {}".format(p, params[p]))
Check that parameters are integers as expected Raises ------ ValueError : unacceptable choice of parameters
def content(self, value): value = self._prepend_seperator(value) self._content = value
The main component of the log message. The content field is a freeform field that often begins with the process ID (pid) of the program that created the message.
async def upload_file(self, Filename, Bucket, Key, ExtraArgs=None, Callback=None, Config=None): with open(Filename, 'rb') as open_file: await upload_fileobj(self, open_file, Bucket, Key, ExtraArgs=ExtraArgs, Callback=Callback, Config=Config)
Upload a file to an S3 object. Usage:: import boto3 s3 = boto3.resource('s3') s3.meta.client.upload_file('/tmp/hello.txt', 'mybucket', 'hello.txt') Similar behavior as S3Transfer's upload_file() method, except that parameters are capitalized.
def append_variables(self, samples_like, sort_labels=True): samples, labels = as_samples(samples_like) num_samples = len(self) if samples.shape[0] == num_samples: pass elif samples.shape[0] == 1 and num_samples: samples = np.repeat(samples, num_samples, axis=0) else: msg = ("mismatched shape. The samples to append should either be " "a single sample or should match the length of the sample " "set. Empty sample sets cannot be appended to.") raise ValueError(msg) variables = self.variables if any(v in variables for v in labels): msg = "Appended samples cannot contain variables in sample set" raise ValueError(msg) new_variables = list(variables) + labels new_samples = np.hstack((self.record.sample, samples)) return type(self).from_samples((new_samples, new_variables), self.vartype, info=copy.deepcopy(self.info), sort_labels=sort_labels, **self.data_vectors)
Create a new sampleset with the given variables with values added. Not defined for empty sample sets. Note that when `sample_like` is a :obj:`.SampleSet`, the data vectors and info are ignored. Args: samples_like: Samples to add to the sample set. Should either be a single sample or should match the length of the sample set. See :func:`.as_samples` for what is allowed to be `samples_like`. sort_labels (bool, optional, default=True): If true, returned :attr:`.SampleSet.variables` will be in sorted-order. Note that mixed types are not sortable in which case the given order will be maintained. Returns: :obj:`.SampleSet`: A new sample set with the variables/values added. Examples: >>> sampleset = dimod.SampleSet.from_samples([{'a': -1, 'b': +1}, ... {'a': +1, 'b': +1}], ... dimod.SPIN, ... energy=[-1.0, 1.0]) >>> new = sampleset.append_variables({'c': -1}) >>> print(new) a b c energy num_oc. 0 -1 +1 -1 -1.0 1 1 +1 +1 -1 1.0 1 ['SPIN', 2 rows, 2 samples, 3 variables] Add variables from another sampleset to the original above. Note that the energies do not change. >>> another = dimod.SampleSet.from_samples([{'c': -1, 'd': +1}, ... {'c': +1, 'd': +1}], ... dimod.SPIN, ... energy=[-2.0, 1.0]) >>> new = sampleset.append_variables(another) >>> print(new) a b c d energy num_oc. 0 -1 +1 -1 +1 -1.0 1 1 +1 +1 +1 +1 1.0 1 ['SPIN', 2 rows, 2 samples, 4 variables]
def list_scripts(zap_helper): scripts = zap_helper.zap.script.list_scripts output = [] for s in scripts: if 'enabled' not in s: s['enabled'] = 'N/A' output.append([s['name'], s['type'], s['engine'], s['enabled']]) click.echo(tabulate(output, headers=['Name', 'Type', 'Engine', 'Enabled'], tablefmt='grid'))
List scripts currently loaded into ZAP.
def pattern_from_collections_and_statement(data_collections, statement): BaseCollection.are_collections_aligned(data_collections) correct_var = BaseCollection._check_conditional_statement( statement, len(data_collections)) num_statement_clean = BaseCollection._replace_operators(statement) pattern = [] for i in xrange(len(data_collections[0])): num_statement = num_statement_clean for j, coll in enumerate(data_collections): var = correct_var[j] num_statement = num_statement.replace(var, str(coll[i])) num_statement = BaseCollection._restore_operators(num_statement) pattern.append(eval(num_statement, {})) return pattern
Generate a list of booleans from data collections and a conditional statement. Args: data_collections: A list of aligned Data Collections to be evaluated against the statement. statement: A conditional statement as a string (e.g. a>25 and a%5==0). The variable should always be named as 'a' (without quotations). Return: pattern: A list of True/False booleans with the length of the Data Collections where True meets the conditional statement and False does not.
def get_all_attributes(self): all_attributes = OrderedDict() if self.__preserve_order else dict() for attributes in self.__sections.itervalues(): for attribute, value in attributes.iteritems(): all_attributes[attribute] = value return all_attributes
Returns all sections attributes. Usage:: >>> content = ["[Section A]\\n", "; Comment.\\n", "Attribute 1 = \\"Value A\\"\\n", "\\n", \ "[Section B]\\n", "Attribute 2 = \\"Value B\\"\\n"] >>> sections_file_parser = SectionsFileParser() >>> sections_file_parser.content = content >>> sections_file_parser.parse() <foundations.parsers.SectionsFileParser object at 0x845683844> >>> sections_file_parser.get_all_attributes() OrderedDict([(u'Section A|Attribute 1', u'Value A'), (u'Section B|Attribute 2', u'Value B')]) >>> sections_file_parser.preserve_order=False >>> sections_file_parser.get_all_attributes() {u'Section B|Attribute 2': u'Value B', u'Section A|Attribute 1': u'Value A'} :return: All sections / files attributes. :rtype: OrderedDict or dict
def get_file(self, cache_id_obj, section=None): section = "default" if section is None else section if "/" in section: raise ValueError("invalid section '{0}'".format(section)) cache_id = "{:08x}".format( zlib.crc32(b"&".join(sorted([ str(k).encode('utf8') + b"=" + str(v).encode('utf8') for k, v in cache_id_obj.items() ]))) & 0xffffffff) return os.path.join(self._full_base, os.path.join(section, os.path.join( "{0}".format(cache_id[:2]), "{0}.tmp".format(cache_id[2:]))))
Returns the file path for the given cache object.
def v1_subfolder_list(request, response, kvlclient, fid): fid = urllib.unquote(fid) try: return sorted(imap(attrgetter('name'), ifilter(lambda it: it.is_folder(), new_folders(kvlclient, request).list(fid)))) except KeyError: response.status = 404 return []
Retrieves a list of subfolders in a folder for the current user. The route for this endpoint is: ``GET /dossier/v1/folder/<fid>/subfolder``. (Temporarily, the "current user" can be set via the ``annotator_id`` query parameter.) The payload returned is a list of subfolder identifiers.
def increment_lineno(node, n=1): for node in zip((node,), walk(node)): if 'lineno' in node._attributes: node.lineno = getattr(node, 'lineno', 0) + n
Increment the line numbers of all nodes by `n` if they have line number attributes. This is useful to "move code" to a different location in a file.
def reload(self): if not self.pending.acquire(False): return control_args = self.config['control'] try: key = control_args.get('limits_key', 'limits') self.limits.set_limits(self.db.zrange(key, 0, -1)) except Exception: LOG.exception("Could not load limits") error_key = control_args.get('errors_key', 'errors') error_channel = control_args.get('errors_channel', 'errors') msg = "Failed to load limits: " + traceback.format_exc() with utils.ignore_except(): self.db.sadd(error_key, msg) with utils.ignore_except(): self.db.publish(error_channel, msg) finally: self.pending.release()
Reloads the limits configuration from the database. If an error occurs loading the configuration, an error-level log message will be emitted. Additionally, the error message will be added to the set specified by the 'redis.errors_key' configuration ('errors' by default) and sent to the publishing channel specified by the 'redis.errors_channel' configuration ('errors' by default).
def shutdown(self): logger.debug('Waiting for service manager thread to finish ...') startTime = time.time() self._terminate.set() self._serviceStarter.join() for services in list(self.toilState.servicesIssued.values()): self.killServices(services, error=True) logger.debug('... finished shutting down the service manager. Took %s seconds', time.time() - startTime)
Cleanly terminate worker threads starting and killing services. Will block until all services are started and blocked.
def intersects(self, other): return any(key in self and self[key].intersects(value) for key, value in other.iteritems())
Returns True if there exists a segmentlist in self that intersects the corresponding segmentlist in other; returns False otherwise. See also: .intersects_all(), .all_intersects(), .all_intersects_all()
def clean_pubmed_identifiers(pmids: Iterable[str]) -> List[str]: return sorted({str(pmid).strip() for pmid in pmids})
Clean a list of PubMed identifiers with string strips, deduplicates, and sorting.
def log(duration, message=None, use_last_commit_message=False): branch = git.branch issue = jira.get_issue(branch) comment = "Working on issue %s" % branch if message: comment = message elif use_last_commit_message: comment = git.get_last_commit_message() if issue: duration = jira.get_elapsed_time(issue) if duration == '.' else duration if duration: jira.add_worklog(issue, timeSpent=duration, adjustEstimate=None, newEstimate=None, reduceBy=None, comment=comment) print "Logged %s against issue %s (%s)" % (duration, branch, comment) else: print "No time logged, less than 0m elapsed."
Log time against the current active issue
def phonetic(s, method, concat=True, encoding='utf-8', decode_error='strict'): if sys.version_info[0] == 2: s = s.apply( lambda x: x.decode(encoding, decode_error) if type(x) == bytes else x) if concat: s = s.str.replace(r"[\-\_\s]", "") for alg in _phonetic_algorithms: if method in alg['argument_names']: phonetic_callback = alg['callback'] break else: raise ValueError("The algorithm '{}' is not known.".format(method)) return s.str.upper().apply( lambda x: phonetic_callback(x) if pandas.notnull(x) else np.nan )
Convert names or strings into phonetic codes. The implemented algorithms are `soundex <https://en.wikipedia.org/wiki/Soundex>`_, `nysiis <https://en.wikipedia.org/wiki/New_York_State_Identification_and_ Intelligence_System>`_, `metaphone <https://en.wikipedia.org/wiki/Metaphone>`_ or `match_rating <https://en.wikipedia.org/wiki/Match_rating_approach>`_. Parameters ---------- s : pandas.Series A pandas.Series with string values (often names) to encode. method: str The algorithm that is used to phonetically encode the values. The possible options are "soundex", "nysiis", "metaphone" or "match_rating". concat: bool, optional Remove whitespace before phonetic encoding. encoding: str, optional If bytes are given, this encoding is used to decode. Default is 'utf-8'. decode_error: {'strict', 'ignore', 'replace'}, optional Instruction on what to do if a byte Series is given that contains characters not of the given `encoding`. By default, it is 'strict', meaning that a UnicodeDecodeError will be raised. Other values are 'ignore' and 'replace'. Returns ------- pandas.Series A Series with phonetic encoded values.
def value(self): if not self._done.is_set(): raise AttributeError("value") if self._failure: raise self._failure[0], self._failure[1], self._failure[2] return self._value
The final value, if it has arrived :raises: AttributeError, if not yet complete :raises: an exception if the Future was :meth:`abort`\ed
def _write_import_root_map_file(path, import_root_map): with safe_concurrent_creation(path) as safe_path: with open(safe_path, 'w') as fp: for import_path, root in sorted(import_root_map.items()): fp.write('{}\t{}\n'.format(import_path, root))
Writes a file mapping import paths to roots.
def idents_from_label(lab, subtopic=False): if not subtopic: return (lab.content_id1, None), (lab.content_id2, None) else: return ( (lab.content_id1, lab.subtopic_id1), (lab.content_id2, lab.subtopic_id2), )
Returns the "ident" of a label. If ``subtopic`` is ``True``, then a pair of pairs is returned, where each pair corresponds to the content id and subtopic id in the given label. Otherwise, a pair of pairs is returned, but the second element of each pair is always ``None``. This is a helper function that is useful for dealing with generic label identifiers.
def log_train_metric(period, auto_reset=False): def _callback(param): if param.nbatch % period == 0 and param.eval_metric is not None: name_value = param.eval_metric.get_name_value() for name, value in name_value: logging.info('Iter[%d] Batch[%d] Train-%s=%f', param.epoch, param.nbatch, name, value) if auto_reset: param.eval_metric.reset_local() return _callback
Callback to log the training evaluation result every period. Parameters ---------- period : int The number of batch to log the training evaluation metric. auto_reset : bool Reset the metric after each log. Returns ------- callback : function The callback function that can be passed as iter_epoch_callback to fit.
def log(level, message): if redis_instance is None: __connect() if level not in __error_levels: raise InvalidErrorLevel('You have used an invalid error level. \ Please choose in: ' + ', '.join(__error_levels)) if channel is None: raise NoChannelError('Please set a channel.') c = '{channel}.{level}'.format(channel=channel, level=level) redis_instance.publish(c, message)
Publish `message` with the `level` the redis `channel`. :param level: the level of the message :param message: the message you want to log
def draw(self, img, pixmapper, bounds): if self._img is None: self._img = self.draw_legend() w = self._img.shape[1] h = self._img.shape[0] px = 5 py = 5 img[py:py+h,px:px+w] = self._img
draw legend on the image
def clip_upper(self, threshold, axis=None, inplace=False): warnings.warn('clip_upper(threshold) is deprecated, ' 'use clip(upper=threshold) instead', FutureWarning, stacklevel=2) return self._clip_with_one_bound(threshold, method=self.le, axis=axis, inplace=inplace)
Trim values above a given threshold. .. deprecated:: 0.24.0 Use clip(upper=threshold) instead. Elements above the `threshold` will be changed to match the `threshold` value(s). Threshold can be a single value or an array, in the latter case it performs the truncation element-wise. Parameters ---------- threshold : numeric or array-like Maximum value allowed. All values above threshold will be set to this value. * float : every value is compared to `threshold`. * array-like : The shape of `threshold` should match the object it's compared to. When `self` is a Series, `threshold` should be the length. When `self` is a DataFrame, `threshold` should 2-D and the same shape as `self` for ``axis=None``, or 1-D and the same length as the axis being compared. axis : {0 or 'index', 1 or 'columns'}, default 0 Align object with `threshold` along the given axis. inplace : bool, default False Whether to perform the operation in place on the data. .. versionadded:: 0.21.0 Returns ------- Series or DataFrame Original data with values trimmed. See Also -------- Series.clip : General purpose method to trim Series values to given threshold(s). DataFrame.clip : General purpose method to trim DataFrame values to given threshold(s). Examples -------- >>> s = pd.Series([1, 2, 3, 4, 5]) >>> s 0 1 1 2 2 3 3 4 4 5 dtype: int64 >>> s.clip(upper=3) 0 1 1 2 2 3 3 3 4 3 dtype: int64 >>> elemwise_thresholds = [5, 4, 3, 2, 1] >>> elemwise_thresholds [5, 4, 3, 2, 1] >>> s.clip(upper=elemwise_thresholds) 0 1 1 2 2 3 3 2 4 1 dtype: int64
def bitwise_xor(bs0: str, bs1: str) -> str: if len(bs0) != len(bs1): raise ValueError("Bit strings are not of equal length") n_bits = len(bs0) return PADDED_BINARY_BIT_STRING.format(xor(int(bs0, 2), int(bs1, 2)), n_bits)
A helper to calculate the bitwise XOR of two bit string :param bs0: String of 0's and 1's representing a number in binary representations :param bs1: String of 0's and 1's representing a number in binary representations :return: String of 0's and 1's representing the XOR between bs0 and bs1
def break_edge(self, from_index, to_index, to_jimage=None, allow_reverse=False): existing_edges = self.graph.get_edge_data(from_index, to_index) existing_reverse = None if to_jimage is None: raise ValueError("Image must be supplied, to avoid ambiguity.") if existing_edges: for i, properties in existing_edges.items(): if properties["to_jimage"] == to_jimage: edge_index = i self.graph.remove_edge(from_index, to_index, edge_index) else: if allow_reverse: existing_reverse = self.graph.get_edge_data(to_index, from_index) if existing_reverse: for i, properties in existing_reverse.items(): if properties["to_jimage"] == to_jimage: edge_index = i self.graph.remove_edge(to_index, from_index, edge_index) else: raise ValueError("Edge cannot be broken between {} and {};\ no edge exists between those sites.".format( from_index, to_index ))
Remove an edge from the StructureGraph. If no image is given, this method will fail. :param from_index: int :param to_index: int :param to_jimage: tuple :param allow_reverse: If allow_reverse is True, then break_edge will attempt to break both (from_index, to_index) and, failing that, will attempt to break (to_index, from_index). :return:
def get_cpu_info(self): info = snap7.snap7types.S7CpuInfo() result = self.library.Cli_GetCpuInfo(self.pointer, byref(info)) check_error(result, context="client") return info
Retrieves CPU info from client
def traverse(self, predicate=lambda i, d: True, prune=lambda i, d: False, depth=-1, branch_first=True, visit_once=False, ignore_self=1): return super(Tree, self).traverse(predicate, prune, depth, branch_first, visit_once, ignore_self)
For documentation, see util.Traversable.traverse Trees are set to visit_once = False to gain more performance in the traversal
def generate_drone_plugin(self): example = {} example['pipeline'] = {} example['pipeline']['appname'] = {} example['pipeline']['appname']['image'] = "" example['pipeline']['appname']['secrets'] = "" for key, value in self.spec.items(): if value['type'] in (dict, list): kvalue = f"\'{json.dumps(value.get('example', ''))}\'" else: kvalue = f"{value.get('example', '')}" example['pipeline']['appname'][key.lower()] = kvalue print(yaml.dump(example, default_flow_style=False))
Generate a sample drone plugin configuration
def _add_sync_queues_and_barrier(self, name, dependencies): self._sync_queue_counter += 1 with tf.device(self.sync_queue_devices[self._sync_queue_counter % len(self.sync_queue_devices)]): sync_queues = [ tf.FIFOQueue(self.num_worker, [tf.bool], shapes=[[]], shared_name='%s%s' % (name, i)) for i in range(self.num_worker)] queue_ops = [] token = tf.constant(False) with tf.control_dependencies(dependencies): for i, q in enumerate(sync_queues): if i != self.task_index: queue_ops.append(q.enqueue(token)) queue_ops.append( sync_queues[self.task_index].dequeue_many(len(sync_queues) - 1)) return tf.group(*queue_ops, name=name)
Adds ops to enqueue on all worker queues. Args: name: prefixed for the shared_name of ops. dependencies: control dependency from ops. Returns: an op that should be used as control dependency before starting next step.
def build(): try: cloud_config = CloudConfig() config_data = cloud_config.config_data('cluster') cloud_init = CloudInit() print(cloud_init.build(config_data)) except CloudComposeException as ex: print(ex)
builds the cloud_init script
def GetCloudPath(self, resource_id, cache, database): cloud_path = cache.GetResults('cloud_path') if not cloud_path: results = database.Query(self.CLOUD_PATH_CACHE_QUERY) cache.CacheQueryResults( results, 'cloud_path', 'resource_id', ('filename', 'parent')) cloud_path = cache.GetResults('cloud_path') if resource_id == 'folder:root': return '/' paths = [] parent_path, parent_id = cloud_path.get(resource_id, ['', '']) while parent_path: if parent_path == 'folder:root': break paths.append(parent_path) parent_path, parent_id = cloud_path.get(parent_id, ['', '']) if not paths: return '/' paths.reverse() return '/{0:s}/'.format('/'.join(paths))
Return cloud path given a resource id. Args: resource_id (str): resource identifier for the file. cache (SQLiteCache): cache. database (SQLiteDatabase): database. Returns: str: full path to the resource value.
def render_targets_weighted_spans( targets, preserve_density, ): prepared_weighted_spans = prepare_weighted_spans( targets, preserve_density) def _fmt_pws(pws): name = ('<b>{}:</b> '.format(pws.doc_weighted_spans.vec_name) if pws.doc_weighted_spans.vec_name else '') return '{}{}'.format(name, render_weighted_spans(pws)) def _fmt_pws_list(pws_lst): return '<br/>'.join(_fmt_pws(pws) for pws in pws_lst) return [_fmt_pws_list(pws_lst) if pws_lst else None for pws_lst in prepared_weighted_spans]
Return a list of rendered weighted spans for targets. Function must accept a list in order to select consistent weight ranges across all targets.
def getRelationships(self, pid, subject=None, predicate=None, format=None): http_args = {} if subject is not None: http_args['subject'] = subject if predicate is not None: http_args['predicate'] = predicate if format is not None: http_args['format'] = format url = 'objects/%(pid)s/relationships' % {'pid': pid} return self.get(url, params=http_args)
Get information about relationships on an object. Wrapper function for `Fedora REST API getRelationships <https://wiki.duraspace.org/display/FEDORA34/REST+API#RESTAPI-getRelationships>`_ :param pid: object pid :param subject: subject (optional) :param predicate: predicate (optional) :param format: format :rtype: :class:`requests.models.Response`
def overlap(self, x, ctrs, kdtree=None): q = len(self.within(x, ctrs, kdtree=kdtree)) return q
Check how many balls `x` falls within. Uses a K-D Tree to perform the search if provided.
def handle_exception(error): try: if _get_acceptable_response_type() == JSON: response = jsonify(error.to_dict()) response.status_code = error.code return response else: return error.abort() except InvalidAPIUsage: response = jsonify(error.to_dict()) response.status_code = 415 return response
Return a response with the appropriate status code, message, and content type when an ``InvalidAPIUsage`` exception is raised.
def is_snp(reference_bases, alternate_bases): if len(reference_bases) > 1: return False for alt in alternate_bases: if alt is None: return False if alt not in ['A', 'C', 'G', 'T', 'N', '*']: return False return True
Return whether or not the variant is a SNP
def _add_cable_to_equipment_changes(network, line): network.results.equipment_changes = \ network.results.equipment_changes.append( pd.DataFrame( {'iteration_step': [0], 'change': ['added'], 'equipment': [line.type.name], 'quantity': [1] }, index=[line] ) )
Add cable to the equipment changes All changes of equipment are stored in network.results.equipment_changes which is used later to determine grid expansion costs. Parameters ---------- network : :class:`~.grid.network.Network` The eDisGo container object line : class:`~.grid.components.Line` Line instance which is to be added
def gdal2np_dtype(b): dt_dict = gdal_array.codes if isinstance(b, str): b = gdal.Open(b) if isinstance(b, gdal.Dataset): b = b.GetRasterBand(1) if isinstance(b, gdal.Band): b = b.DataType if isinstance(b, int): np_dtype = dt_dict[b] else: np_dtype = None print("Input must be GDAL Dataset or RasterBand object") return np_dtype
Get NumPy datatype that corresponds with GDAL RasterBand datatype Input can be filename, GDAL Dataset, GDAL RasterBand, or GDAL integer dtype
def add_resources(res_a, res_b): return {resource: value + res_b.get(resource, 0) for resource, value in iteritems(res_a)}
Return the resources after adding res_b's resources to res_a. Parameters ---------- res_a : dict Dictionary `{resource: value, ...}`. res_b : dict Dictionary `{resource: value, ...}`. Must be a (non-strict) subset of res_a. If A resource is not present in res_b, the value is presumed to be 0.
def find_by_ids(ids, connection=None, page_size=100, page_number=0, sort_by=DEFAULT_SORT_BY, sort_order=DEFAULT_SORT_ORDER): ids = ','.join([str(i) for i in ids]) return pybrightcove.connection.ItemResultSet('find_playlists_by_ids', Playlist, connection, page_size, page_number, sort_by, sort_order, playlist_ids=ids)
List playlists by specific IDs.
def unregister_a_problem(self, prob): self.source_problems.remove(prob.uuid) if not self.source_problems: self.is_impact = False self.unset_impact_state() self.broks.append(self.get_update_status_brok())
Remove the problem from our problems list and check if we are still 'impacted' :param prob: problem to remove :type prob: alignak.objects.schedulingitem.SchedulingItem :return: None
def task_start(self, **kw): id, task = self.get_task(**kw) self._execute(id, 'start') return self.get_task(uuid=task['uuid'])[1]
Marks a task as started.
def form_node(cls): assert issubclass(cls, FormNode) res = attrs(init=False, slots=True)(cls) res._args = [] res._required_args = 0 res._rest_arg = None state = _FormArgMode.REQUIRED for field in fields(res): if 'arg_mode' in field.metadata: if state is _FormArgMode.REST: raise RuntimeError('rest argument must be last') if field.metadata['arg_mode'] is _FormArgMode.REQUIRED: if state is _FormArgMode.OPTIONAL: raise RuntimeError('required arg after optional arg') res._args.append(field) res._required_args += 1 elif field.metadata['arg_mode'] is _FormArgMode.OPTIONAL: state = _FormArgMode.OPTIONAL res._args.append(field) elif field.metadata['arg_mode'] is _FormArgMode.REST: state = _FormArgMode.REST res._rest_arg = field else: assert 0 return res
A class decorator to finalize fully derived FormNode subclasses.
def format_arg(arg): s = str(arg) dot = s.rfind('.') if dot >= 0: s = s[dot + 1:] s = s.replace(';', '') s = s.replace('[]', 'Array') if len(s) > 0: c = s[0].lower() s = c + s[1:] return s
formats an argument to be shown
def clone(self): "Mimic the behavior of torch.clone for `ImagePoints` objects." return self.__class__(FlowField(self.size, self.flow.flow.clone()), scale=False, y_first=False)
Mimic the behavior of torch.clone for `ImagePoints` objects.
async def delete(self): if self.is_channel: await self._client(functions.channels.LeaveChannelRequest( self.input_entity)) else: if self.is_group: await self._client(functions.messages.DeleteChatUserRequest( self.entity.id, types.InputPeerSelf())) await self._client(functions.messages.DeleteHistoryRequest( self.input_entity, 0))
Deletes the dialog from your dialog list. If you own the channel this won't destroy it, only delete it from the list.
def validate_signature(request, secret_key): data = request.GET.copy() if request.method != 'GET': message_body = getattr(request, request.method, {}) data.update(message_body) if data.get('sig', False): sig = data['sig'] del data['sig'] else: return False if data.get('t', False): timestamp = int(data.get('t', False)) del data['t'] else: return False local_time = datetime.utcnow() remote_time = datetime.utcfromtimestamp(timestamp) if local_time > remote_time: delta = local_time - remote_time else: delta = remote_time - local_time if delta.seconds > 5 * 60: return False return sig == calculate_signature(secret_key, data, timestamp)
Validates the signature associated with the given request.
def do_status(self, line): print('{} {}'.format(bold('Pyrene version'), green(get_version()))) pip_conf = os.path.expanduser('~/.pip/pip.conf') if os.path.exists(pip_conf): conf = read_file(pip_conf) repo = self._get_repo_for_pip_conf(conf) if repo: print( '{} is configured for repository "{}"' .format(bold(pip_conf), green(repo.name)) ) else: print( '{} exists, but is a {}' .format(bold(pip_conf), red('custom configuration')) ) else: print('{} {}'.format(bold(pip_conf), red('does not exists'))) if os.path.exists(self.pypirc): template = green('exists') else: template = red('does not exists') template = '{} ' + template print(template.format(bold(self.pypirc)))
Show python packaging configuration status
def RegisterBuiltin(cls, arg): if arg in cls.types_dict: raise RuntimeError, '%s already registered' %arg class _Wrapper(arg): 'Wrapper for builtin %s\n%s' %(arg, cls.__doc__) _Wrapper.__name__ = '_%sWrapper' %arg.__name__ cls.types_dict[arg] = _Wrapper
register a builtin, create a new wrapper.
def get_frame_list(): frame_info_list = [] frame_list = [] frame = inspect.currentframe() while frame is not None: frame_list.append(frame) info = inspect.getframeinfo(frame) frame_info_list.append(info) frame = frame.f_back frame_info_list.reverse() frame_list.reverse() frame_info_str_list = [format_frameinfo(fi) for fi in frame_info_list] return frame_list, frame_info_list, frame_info_str_list
Create the list of frames
def _reset_model(self, table, model): old_sel_model = table.selectionModel() table.setModel(model) if old_sel_model: del old_sel_model
Set the model in the given table.
def fit_heptad_register(crangles): crangles = [x if x > 0 else 360 + x for x in crangles] hept_p = [x * (360.0 / 7.0) + ((360.0 / 7.0) / 2.0) for x in range(7)] ideal_crangs = [ hept_p[0], hept_p[2], hept_p[4], hept_p[6], hept_p[1], hept_p[3], hept_p[5] ] full_hept = len(crangles) // 7 ideal_crang_list = ideal_crangs * (full_hept + 2) fitting = [] for i in range(7): ang_pairs = zip(crangles, ideal_crang_list[i:]) ang_diffs = [abs(y - x) for x, y in ang_pairs] fitting.append((i, numpy.mean(ang_diffs), numpy.std(ang_diffs))) return sorted(fitting, key=lambda x: x[1])
Attempts to fit a heptad repeat to a set of Crick angles. Parameters ---------- crangles: [float] A list of average Crick angles for the coiled coil. Returns ------- fit_data: [(float, float, float)] Sorted list of fits for each heptad position.
def get_valid_class_name(s: str) -> str: s = str(s).strip() s = ''.join([w.title() for w in re.split(r'\W+|_', s)]) return re.sub(r'[^\w|_]', '', s)
Return the given string converted so that it can be used for a class name Remove leading and trailing spaces; removes spaces and capitalizes each word; and remove anything that is not alphanumeric. Returns a pep8 compatible class name. :param s: The string to convert. :returns: The updated string.
def _check_compatibility(self): stored_descr = self._file_trace_description() try: for k, v in self._model_trace_description(): assert(stored_descr[k][0] == v[0]) except: raise ValueError( "The objects to tally are incompatible with the objects stored in the file.")
Make sure the next objects to be tallied are compatible with the stored trace.
def convert_bytes(n): symbols = ('K', 'M', 'G', 'T', 'P', 'E', 'Z', 'Y') prefix = {} for i, s in enumerate(symbols): prefix[s] = 1 << (i + 1) * 10 for s in reversed(symbols): if n >= prefix[s]: value = float(n) / prefix[s] return '%.1f%s' % (value, s) return "%sB" % n
Convert a size number to 'K', 'M', .etc
def make_child_of(self, chunk): if self.is_mapping(): for key, value in self.contents.items(): self.key(key, key).pointer.make_child_of(chunk.pointer) self.val(key).make_child_of(chunk) elif self.is_sequence(): for index, item in enumerate(self.contents): self.index(index).make_child_of(chunk) else: self.pointer.make_child_of(chunk.pointer)
Link one YAML chunk to another. Used when inserting a chunk of YAML into another chunk.
def _describe_me(self): return ( self.display_name, ' (cont: %s)' % self.run_func if self.is_continuation else '', ' (syscall)' if self.is_syscall else '', ' (inline)' if not self.use_state_arguments else '', ' (stub)' if self.is_stub else '', )
return a 5-tuple of strings sufficient for formatting with ``%s%s%s%s%s`` to verbosely describe the procedure
def _name_search(cls, method, filters): filters = cls._get_name_filters(filters) return [ cls.deserialize(cls._zeep_to_dict(row)) for row in method(filters) ]
Helper for search methods that use name filters. Args: method (callable): The Five9 API method to call with the name filters. filters (dict): A dictionary of search parameters, keyed by the name of the field to search. This should conform to the schema defined in :func:`five9.Five9.create_criteria`. Returns: list[BaseModel]: A list of records representing the result.
def _initiate_replset(self, port, name, maxwait=30): if not self.args['replicaset'] and name != 'configRepl': if self.args['verbose']: print('Skipping replica set initialization for %s' % name) return con = self.client('localhost:%i' % port) try: rs_status = con['admin'].command({'replSetGetStatus': 1}) return rs_status except OperationFailure as e: for i in range(maxwait): try: con['admin'].command({'replSetInitiate': self.config_docs[name]}) break except OperationFailure as e: print(e.message + " - will retry") time.sleep(1) if self.args['verbose']: print("initializing replica set '%s' with configuration: %s" % (name, self.config_docs[name])) print("replica set '%s' initialized." % name)
Initiate replica set.
def _validate_xor_args(self, p): if len(p[1]) != 2: raise ValueError('Invalid syntax: XOR only accepts 2 arguments, got {0}: {1}'.format(len(p[1]), p))
Raises ValueError if 2 arguments are not passed to an XOR
def write_parfile(df,parfile): columns = ["parnme","parval1","scale","offset"] formatters = {"parnme":lambda x:"{0:20s}".format(x), "parval1":lambda x:"{0:20.7E}".format(x), "scale":lambda x:"{0:20.7E}".format(x), "offset":lambda x:"{0:20.7E}".format(x)} for col in columns: assert col in df.columns,"write_parfile() error: " +\ "{0} not found in df".format(col) with open(parfile,'w') as f: f.write("single point\n") f.write(df.to_string(col_space=0, columns=columns, formatters=formatters, justify="right", header=False, index=False, index_names=False) + '\n')
write a pest parameter file from a dataframe Parameters ---------- df : (pandas.DataFrame) dataframe with column names that correspond to the entries in the parameter data section of a pest control file parfile : str name of the parameter file to write
def overlap(a, b): _check_steps(a, b) return a.stop >= b.start and b.stop >= a.start
Check if two ranges overlap. Parameters ---------- a : range The first range. b : range The second range. Returns ------- overlaps : bool Do these ranges overlap. Notes ----- This function does not support ranges with step != 1.