code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_preservation_info(self, obj): info = self.get_base_info(obj) info.update({}) return info
Returns the info for a Preservation
def _get_vispy_caller(): records = inspect.stack() for record in records[5:]: module = record[0].f_globals['__name__'] if module.startswith('vispy'): line = str(record[0].f_lineno) func = record[3] cls = record[0].f_locals.get('self', None) clsname = "" if cls is None else cls.__class__.__name__ + '.' caller = "{0}:{1}{2}({3}): ".format(module, clsname, func, line) return caller return 'unknown'
Helper to get vispy calling function from the stack
def clear(self): with self.__svc_lock: self.__svc_registry.clear() self.__svc_factories.clear() self.__svc_specs.clear() self.__bundle_svc.clear() self.__bundle_imports.clear() self.__factory_usage.clear() self.__pending_services.clear()
Clears the registry
def convertSequenceMachineSequence(generatedSequences): sequenceList = [] currentSequence = [] for s in generatedSequences: if s is None: sequenceList.append(currentSequence) currentSequence = [] else: currentSequence.append(s) return sequenceList
Convert a sequence from the SequenceMachine into a list of sequences, such that each sequence is a list of set of SDRs.
def is_text_visible(driver, text, selector, by=By.CSS_SELECTOR): try: element = driver.find_element(by=by, value=selector) return element.is_displayed() and text in element.text except Exception: return False
Returns whether the specified text is visible in the specified selector. @Params driver - the webdriver object (required) text - the text string to search for selector - the locator that is used (required) by - the method to search for the locator (Default: By.CSS_SELECTOR) @Returns Boolean (is text visible)
def has_all_nonzero_neurite_radii(neuron, threshold=0.0): bad_ids = [] seen_ids = set() for s in _nf.iter_sections(neuron): for i, p in enumerate(s.points): info = (s.id, i) if p[COLS.R] <= threshold and info not in seen_ids: seen_ids.add(info) bad_ids.append(info) return CheckResult(len(bad_ids) == 0, bad_ids)
Check presence of neurite points with radius not above threshold Arguments: neuron(Neuron): The neuron object to test threshold: value above which a radius is considered to be non-zero Returns: CheckResult with result including list of (section ID, point ID) pairs of zero-radius points
def read(filename, data_wrapper=DataWrapper): data = np.loadtxt(filename) if len(np.shape(data)) == 1: data = np.reshape(data, (1, -1)) data = data[:, [X, Y, Z, R, TYPE, ID, P]] return data_wrapper(data, 'SWC', None)
Read an SWC file and return a tuple of data, format.
async def set_config(self, on=None, tholddark=None, tholdoffset=None): data = { key: value for key, value in { 'on': on, 'tholddark': tholddark, 'tholdoffset': tholdoffset, }.items() if value is not None } await self._request('put', 'sensors/{}/config'.format(self.id), json=data)
Change config of a CLIP LightLevel sensor.
def board_msg(self): board_str = "s\t\t" for i in xrange(self.board_width): board_str += str(i)+"\t" board_str = board_str.expandtabs(4)+"\n\n" for i in xrange(self.board_height): temp_line = str(i)+"\t\t" for j in xrange(self.board_width): if self.info_map[i, j] == 9: temp_line += "@\t" elif self.info_map[i, j] == 10: temp_line += "?\t" elif self.info_map[i, j] == 11: temp_line += "*\t" elif self.info_map[i, j] == 12: temp_line += "!\t" else: temp_line += str(self.info_map[i, j])+"\t" board_str += temp_line.expandtabs(4)+"\n" return board_str
Structure a board as in print_board.
def daily_bounds(network, snapshots): sus = network.storage_units network.model.period_starts = network.snapshot_weightings.index[0::24] network.model.storages = sus.index def day_rule(m, s, p): return ( m.state_of_charge[s, p] == m.state_of_charge[s, p + pd.Timedelta(hours=23)]) network.model.period_bound = po.Constraint( network.model.storages, network.model.period_starts, rule=day_rule)
This will bound the storage level to 0.5 max_level every 24th hour.
def get_items(self, collection_uri): cname = os.path.split(collection_uri)[1] return self.search_metadata("collection_name:%s" % cname)
Return all items in this collection. :param collection_uri: The URI that references the collection :type collection_uri: String :rtype: List :returns: a list of the URIs of the items in this collection
def create_single_poll_submission(self, poll_id, poll_session_id, poll_submissions_poll_choice_id): path = {} data = {} params = {} path["poll_id"] = poll_id path["poll_session_id"] = poll_session_id data["poll_submissions[poll_choice_id]"] = poll_submissions_poll_choice_id self.logger.debug("POST /api/v1/polls/{poll_id}/poll_sessions/{poll_session_id}/poll_submissions with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/polls/{poll_id}/poll_sessions/{poll_session_id}/poll_submissions".format(**path), data=data, params=params, no_data=True)
Create a single poll submission. Create a new poll submission for this poll session
def start_session_if_none(self): if not (self._screen_id and self._session): self.update_screen_id() self._session = YouTubeSession(screen_id=self._screen_id)
Starts a session it is not yet initialized.
def addOutParameter(self, name, type, namespace=None, element_type=0): parameter = ParameterInfo(name, type, namespace, element_type) self.outparams.append(parameter) return parameter
Add an output parameter description to the call info.
def max(self, values, axis=0): values = np.asarray(values) return self.unique, self.reduce(values, np.maximum, axis)
return the maximum within each group Parameters ---------- values : array_like, [keys, ...] values to take maximum of per group axis : int, optional alternative reduction axis for values Returns ------- unique: ndarray, [groups] unique keys reduced : ndarray, [groups, ...] value array, reduced over groups
def _RawData(self, data): if not isinstance(data, dict): return data result = collections.OrderedDict() for k, v in iteritems(data): result[k] = self._RawData(v) return result
Convert data to common format. Configuration options are normally grouped by the functional component which define it (e.g. Logging.path is the path parameter for the logging subsystem). However, sometimes it is more intuitive to write the config as a flat string (e.g. Logging.path). In this case we group all the flat strings in their respective sections and create the sections automatically. Args: data: A dict of raw data. Returns: a dict in common format. Any keys in the raw data which have a "." in them are separated into their own sections. This allows the config to be written explicitly in dot notation instead of using a section.
def dispatch(self): method_name = 'on_' + self.environ['REQUEST_METHOD'].lower() method = getattr(self, method_name, None) if method: return method() else: return self.on_bad_method()
Handles dispatching of the request.
def parse_runway_config(self): if not os.path.isfile(self.runway_config_path): LOGGER.error("Runway config file was not found (looking for " "%s)", self.runway_config_path) sys.exit(1) with open(self.runway_config_path) as data_file: return yaml.safe_load(data_file)
Read and parse runway.yml.
def generate_new_cid(upstream_cid=None): if upstream_cid is None: return str(uuid.uuid4()) if getattr(settings, 'CID_GENERATE', False) else None if ( getattr(settings, 'CID_CONCATENATE_IDS', False) and getattr(settings, 'CID_GENERATE', False) ): return '%s, %s' % (upstream_cid, str(uuid.uuid4())) return upstream_cid
Generate a new correlation id, possibly based on the given one.
def genlmsg_parse(nlh, hdrlen, tb, maxtype, policy): if not genlmsg_valid_hdr(nlh, hdrlen): return -NLE_MSG_TOOSHORT ghdr = genlmsghdr(nlmsg_data(nlh)) return int(nla_parse(tb, maxtype, genlmsg_attrdata(ghdr, hdrlen), genlmsg_attrlen(ghdr, hdrlen), policy))
Parse Generic Netlink message including attributes. https://github.com/thom311/libnl/blob/libnl3_2_25/lib/genl/genl.c#L191 Verifies the validity of the Netlink and Generic Netlink headers using genlmsg_valid_hdr() and calls nla_parse() on the message payload to parse eventual attributes. Positional arguments: nlh -- Netlink message header (nlmsghdr class instance). hdrlen -- length of user header (integer). tb -- empty dict, to be updated with nlattr class instances to store parsed attributes. maxtype -- maximum attribute id expected (integer). policy -- dictionary of nla_policy class instances as values, with nla types as keys. Returns: 0 on success or a negative error code.
def get_precursor_mz(exact_mass, precursor_type): d = {'[M-H]-': -1.007276, '[M+H]+': 1.007276, '[M+H-H2O]+': 1.007276 - ((1.007276 * 2) + 15.9949) } try: return exact_mass + d[precursor_type] except KeyError as e: print(e) return False
Calculate precursor mz based on exact mass and precursor type Args: exact_mass (float): exact mass of compound of interest precursor_type (str): Precursor type (currently only works with '[M-H]-', '[M+H]+' and '[M+H-H2O]+' Return: neutral mass of compound
def _link(self, next_worker, next_is_first=False): lock = multiprocessing.Lock() next_worker._lock_prev_input = lock self._lock_next_input = lock lock.acquire() lock = multiprocessing.Lock() next_worker._lock_prev_output = lock self._lock_next_output = lock lock.acquire() if next_is_first: self._lock_next_input.release() self._lock_next_output.release()
Link the worker to the given next worker object, connecting the two workers with communication tubes.
def run(self): self.show() if not self.wait: return for image in self.slides: wait = image.get('time', 0) wait = max(self.wait, wait) print('waiting %d seconds %s' % ( wait, image.get('image', ''))) yield image time.sleep(wait) self.next()
Run the show
def getSubstitutionElement(self, elt, ps): nsuri,ncname = _get_element_nsuri_name(elt) typecode = GED(nsuri,ncname) if typecode is None: return try: nsuri,ncname = typecode.substitutionGroup except (AttributeError, TypeError): return if (ncname == self.pname) and (nsuri == self.nspname or (not nsuri and not self.nspname)): return typecode return
if elt matches a member of the head substitutionGroup, return the GED typecode representation of the member. head -- ElementDeclaration typecode, elt -- the DOM element being parsed ps -- ParsedSoap instance
def tags_context(self, worker_ctx, exc_info): tags = { 'call_id': worker_ctx.call_id, 'parent_call_id': worker_ctx.immediate_parent_call_id, 'service_name': worker_ctx.container.service_name, 'method_name': worker_ctx.entrypoint.method_name } for key in worker_ctx.context_data: for matcher in self.tag_type_context_keys: if re.search(matcher, key): tags[key] = worker_ctx.context_data[key] break self.client.tags_context(tags)
Merge any tags to include in the sentry payload.
def set_cmap(cmap): scale = _context['scales']['color'] for k, v in _process_cmap(cmap).items(): setattr(scale, k, v) return scale
Set the color map of the current 'color' scale.
def register_gid(self, tiled_gid, flags=None): if flags is None: flags = TileFlags(0, 0, 0) if tiled_gid: try: return self.imagemap[(tiled_gid, flags)][0] except KeyError: gid = self.maxgid self.maxgid += 1 self.imagemap[(tiled_gid, flags)] = (gid, flags) self.gidmap[tiled_gid].append((gid, flags)) self.tiledgidmap[gid] = tiled_gid return gid else: return 0
Used to manage the mapping of GIDs between the tmx and pytmx :param tiled_gid: GID that is found in TMX data :rtype: GID that pytmx uses for the the GID passed
def _info_transformers(fields, transformers): if transformers is None: transformers = dict() for f in fields: if f not in transformers: transformers[f] = config.DEFAULT_TRANSFORMER.get(f, None) return tuple(transformers[f] for f in fields)
Utility function to determine transformer functions for variants fields.
def change_return_type(f): @wraps(f) def wrapper(*args, **kwargs): if kwargs.has_key('return_type'): return_type = kwargs['return_type'] kwargs.pop('return_type') return return_type(f(*args, **kwargs)) elif len(args) > 0: return_type = type(args[0]) return return_type(f(*args, **kwargs)) else: return f(*args, **kwargs) return wrapper
Converts the returned value of wrapped function to the type of the first arg or to the type specified by a kwarg key return_type's value.
def show_graph_summary(g): sample_data = [] print("list(g[RDFS.Class]) = " + str(len(list(g[RDFS.Class])))) num_subj = 0 for subj in g.subjects(RDF.type): num_subj += 1 if num_subj < 5: sample_data.append("subjects.subject: " + get_string_from_rdf(subj)) print("g.subjects(RDF.type) = " + str(num_subj)) num_subj = 0 for subj, pred, obj in g: num_subj += 1 if num_subj < 5: sample_data.append("g.subject : " + get_string_from_rdf(pred)) sample_data.append("g.predicate : " + get_string_from_rdf(subj)) sample_data.append("g.object : " + get_string_from_rdf(obj)) print("g.obj(RDF.type) = " + str(num_subj)) print ("------ Sample Data ------") for line in sample_data: print(line)
display sample data from a graph
def get_runtime_value(self, ihcid: int): if self.client.get_runtime_value(ihcid): return True self.re_authenticate() return self.client.get_runtime_value(ihcid)
Get runtime value with re-authenticate if needed
def _ScheduleVariableHunt(hunt_obj): if hunt_obj.client_rate != 0: raise VariableHuntCanNotHaveClientRateError(hunt_obj.hunt_id, hunt_obj.client_rate) seen_clients = set() for flow_group in hunt_obj.args.variable.flow_groups: for client_id in flow_group.client_ids: if client_id in seen_clients: raise CanStartAtMostOneFlowPerClientError(hunt_obj.hunt_id, client_id) seen_clients.add(client_id) now = rdfvalue.RDFDatetime.Now() for flow_group in hunt_obj.args.variable.flow_groups: flow_cls = registry.FlowRegistry.FlowClassByName(flow_group.flow_name) flow_args = flow_group.flow_args if flow_group.HasField( "flow_args") else None for client_id in flow_group.client_ids: flow.StartFlow( client_id=client_id, creator=hunt_obj.creator, cpu_limit=hunt_obj.per_client_cpu_limit, network_bytes_limit=hunt_obj.per_client_network_bytes_limit, flow_cls=flow_cls, flow_args=flow_args, start_at=now, parent_hunt_id=hunt_obj.hunt_id)
Schedules flows for a variable hunt.
def check_overlap(pos, ins, thresh): ins_pos = ins[0] ins_len = ins[2] ol = overlap(ins_pos, pos) feat_len = pos[1] - pos[0] + 1 if float(ol) / float(feat_len) >= thresh: return True return False
make sure thresh % feature is contained within insertion
def from_string(cls, s): log.debug("Parsing email from string") message = email.message_from_string(s) return cls(message)
Init a new object from a string. Args: s (string): raw email Returns: Instance of MailParser
def exit_code_from_run_infos(run_infos: t.List[RunInfo]) -> int: assert run_infos is not None if not hasattr(run_infos, "__iter__"): return run_infos.retcode rcs = [ri.retcode for ri in run_infos] max_rc = max(rcs) min_rc = min(rcs) if max_rc == 0: return min_rc return max_rc
Generate a single exit code from a list of RunInfo objects. Takes a list of RunInfos and returns the exit code that is furthest away from 0. Args: run_infos (t.List[RunInfo]): [description] Returns: int: [description]
def task_done(self, message): topic_partition = (message.topic, message.partition) if topic_partition not in self._topics: logger.warning('Unrecognized topic/partition in task_done message: ' '{0}:{1}'.format(*topic_partition)) return False offset = message.offset prev_done = self._offsets.task_done[topic_partition] if prev_done is not None and offset != (prev_done + 1): logger.warning('Marking task_done on a non-continuous offset: %d != %d + 1', offset, prev_done) prev_commit = self._offsets.commit[topic_partition] if prev_commit is not None and ((offset + 1) <= prev_commit): logger.warning('Marking task_done on a previously committed offset?: %d (+1) <= %d', offset, prev_commit) self._offsets.task_done[topic_partition] = offset if self._does_auto_commit_messages(): self._incr_auto_commit_message_count() if self._should_auto_commit(): self.commit() return True
Mark a fetched message as consumed. Offsets for messages marked as "task_done" will be stored back to the kafka cluster for this consumer group on commit() Arguments: message (KafkaMessage): the message to mark as complete Returns: True, unless the topic-partition for this message has not been configured for the consumer. In normal operation, this should not happen. But see github issue 364.
def load_elements(self, elements): "Initialize the internal element structures" self.pg_no = 0 self.elements = elements self.keys = [v['name'].lower() for v in self.elements]
Initialize the internal element structures
def get_decorators(self, node): if node.parent is None: return [] results = {} if not self.decorated.match(node.parent, results): return [] decorators = results.get('dd') or [results['d']] decs = [] for d in decorators: for child in d.children: if isinstance(child, Leaf) and child.type == token.NAME: decs.append(child.value) return decs
Return a list of decorators found on a function definition. This is a list of strings; only simple decorators (e.g. @staticmethod) are returned. If the function is undecorated or only non-simple decorators are found, return [].
def suspend_zone(self, days, zone=None): if zone is None: zone_cmd = 'suspendall' relay_id = None else: if zone < 0 or zone > (len(self.relays) - 1): return None else: zone_cmd = 'suspend' relay_id = self.relays[zone]['relay_id'] if days <= 0: time_cmd = 0 else: time_cmd = time.mktime(time.localtime()) + (days * 86400) return set_zones(self._user_token, zone_cmd, relay_id, time_cmd)
Suspend or unsuspend a zone or all zones for an amount of time. :param days: Number of days to suspend the zone(s) :type days: int :param zone: The zone to suspend. If no zone is specified then suspend all zones :type zone: int or None :returns: The response from set_zones() or None if there was an error. :rtype: None or string
def queue_exists(name, region, opts=None, user=None): output = list_queues(region, opts, user) return name in _parse_queue_list(output)
Returns True or False on whether the queue exists in the region name Name of the SQS queue to search for region Name of the region to search for the queue in opts : None Any additional options to add to the command line user : None Run hg as a user other than what the minion runs as CLI Example: salt '*' aws_sqs.queue_exists <sqs queue> <region>
def normalize_path(path, resolve_symlinks=True): path = expanduser(path) if resolve_symlinks: path = os.path.realpath(path) else: path = os.path.abspath(path) return os.path.normcase(path)
Convert a path to its canonical, case-normalized, absolute version.
def _ensure_datetimelike_to_i8(other, to_utc=False): from pandas import Index from pandas.core.arrays import PeriodArray if lib.is_scalar(other) and isna(other): return iNaT elif isinstance(other, (PeriodArray, ABCIndexClass, DatetimeLikeArrayMixin)): if getattr(other, 'tz', None) is not None: if to_utc: other = other.tz_convert('UTC') else: other = other.tz_localize(None) else: try: return np.array(other, copy=False).view('i8') except TypeError: other = Index(other) return other.asi8
Helper for coercing an input scalar or array to i8. Parameters ---------- other : 1d array to_utc : bool, default False If True, convert the values to UTC before extracting the i8 values If False, extract the i8 values directly. Returns ------- i8 1d array
def get_codemirror_parameters(self, name): config = self.get_config(name) return {k: config[k] for k in config if k not in self._internal_only}
Return CodeMirror parameters for given configuration name. This is a reduced configuration from internal parameters. Arguments: name (string): Config name from available ones in ``settings.CODEMIRROR_SETTINGS``. Returns: dict: Parameters.
def gt(self, v, limit=None, offset=None): if limit is not None and offset is None: offset = 0 return self.zrangebyscore("(%f" % v, self._max_score, start=offset, num=limit)
Returns the list of the members of the set that have scores greater than v.
def reload(self, hardware_id, post_uri=None, ssh_keys=None): config = {} if post_uri: config['customProvisionScriptUri'] = post_uri if ssh_keys: config['sshKeyIds'] = [key_id for key_id in ssh_keys] return self.hardware.reloadOperatingSystem('FORCE', config, id=hardware_id)
Perform an OS reload of a server with its current configuration. :param integer hardware_id: the instance ID to reload :param string post_uri: The URI of the post-install script to run after reload :param list ssh_keys: The SSH keys to add to the root user
def categories_to_colors(cats, colormap=None): if colormap is None: colormap = tableau20 if type(cats) != pd.Series: cats = pd.Series(cats) legend = pd.Series(dict(zip(set(cats), colormap))) return(legend)
Map categorical data to colors. Parameters ---------- cats : pandas.Series or list Categorical data as a list or in a Series. colormap : list List of RGB triples. If not provided, the tableau20 colormap defined in this module will be used. Returns ------- legend : pd.Series Series whose values are colors and whose index are the original categories that correspond to those colors.
def get_service_details(self, service_id: str) -> dict: if not self._manager: raise RuntimeError('Only the Swarm manager node can retrieve all' ' the services details.') service = self._client.services.get(service_id) return service.attrs
Get details of a service. Only the manager nodes can retrieve service details Args: service_id (string): List of service id Returns: dict, details of the service
def add_adapter(self, adapter): if self._started: raise InternalError("New adapters cannot be added after start() is called") if isinstance(adapter, DeviceAdapter): self._logger.warning("Wrapping legacy device adapter %s in async wrapper", adapter) adapter = AsynchronousModernWrapper(adapter, loop=self._loop) self.adapters.append(adapter) adapter_callback = functools.partial(self.handle_adapter_event, len(self.adapters) - 1) events = ['device_seen', 'broadcast', 'report', 'connection', 'disconnection', 'trace', 'progress'] adapter.register_monitor([None], events, adapter_callback)
Add a device adapter to this aggregating adapter.
def get_value(self, default=None): if default is None: default = '' try: text = self.currentText() except ValueError: lg.debug('Cannot convert "' + str(text) + '" to list. ' + 'Using default ' + str(default)) text = default self.set_value(text) return text
Get selection from widget. Parameters ---------- default : str str for use by widget Returns ------- str selected item from the combobox
def _getNearestMappingIndexList(fromValList, toValList): indexList = [] for fromTimestamp in fromValList: smallestDiff = _getSmallestDifference(toValList, fromTimestamp) i = toValList.index(smallestDiff) indexList.append(i) return indexList
Finds the indicies for data points that are closest to each other. The inputs should be in relative time, scaled from 0 to 1 e.g. if you have [0, .1, .5., .9] and [0, .1, .2, 1] will output [0, 1, 1, 2]
def ImportTypes(self, prefix=''): dynTypeMgr = self.GetTypeManager() filterSpec = None if prefix != '': filterSpec = vmodl.reflect.DynamicTypeManager.TypeFilterSpec( typeSubstr=prefix) allTypes = dynTypeMgr.QueryTypeInfo(filterSpec) DynamicTypeConstructor().CreateTypes(allTypes) return allTypes
Build dynamic types
def read_float_matrix(rx_specifier): path, offset = rx_specifier.strip().split(':', maxsplit=1) offset = int(offset) sample_format = 4 with open(path, 'rb') as f: f.seek(offset) binary = f.read(2) assert (binary == b'\x00B') format = f.read(3) assert (format == b'FM ') f.read(1) num_frames = struct.unpack('<i', f.read(4))[0] f.read(1) feature_size = struct.unpack('<i', f.read(4))[0] data = f.read(num_frames * feature_size * sample_format) feature_vector = np.frombuffer(data, dtype='float32') feature_matrix = np.reshape(feature_vector, (num_frames, feature_size)) return feature_matrix
Return float matrix as np array for the given rx specifier.
def query_params(*es_query_params): def _wrapper(func): @wraps(func) def _wrapped(*args, **kwargs): params = {} if "params" in kwargs: params = kwargs.pop("params").copy() for p in es_query_params + GLOBAL_PARAMS: if p in kwargs: v = kwargs.pop(p) if v is not None: params[p] = _escape(v) for p in ("ignore", "request_timeout"): if p in kwargs: params[p] = kwargs.pop(p) return func(*args, params=params, **kwargs) return _wrapped return _wrapper
Decorator that pops all accepted parameters from method's kwargs and puts them in the params argument.
def __get_size(conn, vm_): size = config.get_cloud_config_value( 'size', vm_, __opts__, default='n1-standard-1', search_global=False) return conn.ex_get_size(size, __get_location(conn, vm_))
Need to override libcloud to find the machine type in the proper zone.
def cli(env): manager = PlacementManager(env.client) result = manager.list() table = formatting.Table( ["Id", "Name", "Backend Router", "Rule", "Guests", "Created"], title="Placement Groups" ) for group in result: table.add_row([ group['id'], group['name'], group['backendRouter']['hostname'], group['rule']['name'], group['guestCount'], group['createDate'] ]) env.fout(table)
List placement groups.
def _override_options(options, **overrides): for opt, val in overrides.items(): passed_value = getattr(options, opt, _Default()) if opt in ('ignore', 'select') and passed_value: value = process_value(opt, passed_value.value) value += process_value(opt, val) setattr(options, opt, value) elif isinstance(passed_value, _Default): setattr(options, opt, process_value(opt, val))
Override options.
def copy_nrpe_checks(nrpe_files_dir=None): NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' if nrpe_files_dir is None: for segment in ['.', 'hooks']: nrpe_files_dir = os.path.abspath(os.path.join( os.getenv('CHARM_DIR'), segment, 'charmhelpers', 'contrib', 'openstack', 'files')) if os.path.isdir(nrpe_files_dir): break else: raise RuntimeError("Couldn't find charmhelpers directory") if not os.path.exists(NAGIOS_PLUGINS): os.makedirs(NAGIOS_PLUGINS) for fname in glob.glob(os.path.join(nrpe_files_dir, "check_*")): if os.path.isfile(fname): shutil.copy2(fname, os.path.join(NAGIOS_PLUGINS, os.path.basename(fname)))
Copy the nrpe checks into place
def bundle_lambda(zipfile): if not zipfile: return 1 with open('bundle.zip', 'wb') as zfile: zfile.write(zipfile) log.info('Finished - a bundle.zip is waiting for you...') return 0
Write zipfile contents to file. :param zipfile: :return: exit_code
def filter_batched_data(data, mapping): for k, v in list(mapping.items()): if isinstance(v, dict) and 'field' in v: if 'transform' in v: continue v = v['field'] elif not isinstance(v, basestring): continue values = data[v] try: if len(unique_array(values)) == 1: mapping[k] = values[0] del data[v] except: pass
Iterates over the data and mapping for a ColumnDataSource and replaces columns with repeating values with a scalar. This is purely and optimization for scalar types.
def on_click(self, event): if event["button"] == self.button_toggle: if "DPMS is Enabled" in self.py3.command_output("xset -q"): self.py3.command_run("xset -dpms s off") else: self.py3.command_run("xset +dpms s on") if event["button"] == self.button_off: self.py3.command_run("xset dpms force off")
Control DPMS with mouse clicks.
def return_resource(self, resource, status=200, statusMessage="OK"): self.set_status(status, statusMessage) self.write(json.loads(json_util.dumps(resource)))
Return a resource response :param str resource: The JSON String representation of a resource response :param int status: Status code to use :param str statusMessage: The message to use in the error response
def monkey_patch(cls): on_read_the_docs = os.environ.get('READTHEDOCS', False) if on_read_the_docs: sys.modules['zbarlight._zbarlight'] = cls
Monkey path zbarlight C extension on Read The Docs
def delete_firmware_image(self, image_id): api = self._get_api(update_service.DefaultApi) api.firmware_image_destroy(image_id=image_id) return
Delete a firmware image. :param str image_id: image ID for the firmware to remove/delete (Required) :return: void
def install_hooks(target, **hooks): for name, hook in hooks.items(): func = getattr(target, name) if not isinstance(func, HookedMethod): func = HookedMethod(func) setattr(target, name, func) func.pending.append(hook)
Given the target `target`, apply the hooks given as keyword arguments to it. If any targeted method has already been hooked, the hooks will not be overridden but will instead be pushed into a list of pending hooks. The final behavior should be that all hooks call each other in a nested stack. :param target: Any object. Its methods named as keys in `hooks` will be replaced by `HookedMethod` objects. :param hooks: Any keywords will be interpreted as hooks to apply. Each method named will hooked with the coresponding function value.
def reverse_sequences(records): logging.info('Applying _reverse_sequences generator: ' 'reversing the order of sites in sequences.') for record in records: rev_record = SeqRecord(record.seq[::-1], id=record.id, name=record.name, description=record.description) _reverse_annotations(record, rev_record) yield rev_record
Reverse the order of sites in sequences.
def setto(self, s): length = len(s) self.b[self.j+1:self.j+1+length] = s self.k = self.j + length
set j+1...k to string s, readjusting k
def exec_iteration(self, counter, context, step_method): logger.debug("starting") context['whileCounter'] = counter logger.info(f"while: running step with counter {counter}") step_method(context) logger.debug(f"while: done step {counter}") result = False if self.stop: result = context.get_formatted_as_type(self.stop, out_type=bool) logger.debug("done") return result
Run a single loop iteration. This method abides by the signature invoked by poll.while_until_true, which is to say (counter, *args, **kwargs). In a normal execution chain, this method's args passed by self.while_loop where context and step_method set. while_until_true injects counter as a 1st arg. Args: counter. int. loop counter, which number of iteration is this. context: (pypyr.context.Context) The pypyr context. This arg will mutate - after method execution will contain the new updated context. step_method: (method/function) This is the method/function that will execute on every loop iteration. Signature is: function(context) Returns: bool. True if self.stop evaluates to True after step execution, False otherwise.
def html_to_cnxml(html_source, cnxml_source): source = _string2io(html_source) xml = etree.parse(source) cnxml = etree.parse(_string2io(cnxml_source)) xml = _transform('html5-to-cnxml.xsl', xml) namespaces = {'c': 'http://cnx.rice.edu/cnxml'} xpath = etree.XPath('//c:content', namespaces=namespaces) replaceable_node = xpath(cnxml)[0] replaceable_node.getparent().replace(replaceable_node, xml.getroot()) return etree.tostring(cnxml)
Transform the HTML to CNXML. We need the original CNXML content in order to preserve the metadata in the CNXML document.
def get_groups(self, condition=None, page_size=1000): query_kwargs = {} if condition is not None: query_kwargs["condition"] = condition.compile() for group_data in self._conn.iter_json_pages("/ws/Group", page_size=page_size, **query_kwargs): yield Group.from_json(group_data)
Return an iterator over all groups in this device cloud account Optionally, a condition can be specified to limit the number of groups returned. Examples:: # Get all groups and print information about them for group in dc.devicecore.get_groups(): print group # Iterate over all devices which are in a group with a specific # ID. group = dc.devicore.get_groups(group_id == 123)[0] for device in dc.devicecore.get_devices(group_path == group.get_path()): print device.get_mac() :param condition: A condition to use when filtering the results set. If unspecified, all groups will be returned. :param int page_size: The number of results to fetch in a single page. In general, the default will suffice. :returns: Generator over the groups in this device cloud account. No guarantees about the order of results is provided and child links between nodes will not be populated.
def genderize(name, api_token=None): GENDERIZE_API_URL = "https://api.genderize.io/" TOTAL_RETRIES = 10 MAX_RETRIES = 5 SLEEP_TIME = 0.25 STATUS_FORCELIST = [502] params = { 'name': name } if api_token: params['apikey'] = api_token session = requests.Session() retries = urllib3.util.Retry(total=TOTAL_RETRIES, connect=MAX_RETRIES, status=MAX_RETRIES, status_forcelist=STATUS_FORCELIST, backoff_factor=SLEEP_TIME, raise_on_status=True) session.mount('http://', requests.adapters.HTTPAdapter(max_retries=retries)) session.mount('https://', requests.adapters.HTTPAdapter(max_retries=retries)) r = session.get(GENDERIZE_API_URL, params=params) r.raise_for_status() result = r.json() gender = result['gender'] prob = result.get('probability', None) acc = int(prob * 100) if prob else None return gender, acc
Fetch gender from genderize.io
def _validate_columns(self): for column_index in range(self.start[1], self.end[1]): table_column = TableTranspose(self.table)[column_index] column_type = None if self.end[0] > self.start[0]: column_type = get_cell_type(table_column[self.start[0]]) num_type_changes = 0 for row_index in range(self.start[0], self.end[0]): if not check_cell_type(table_column[row_index], column_type): column_type = get_cell_type(table_column[row_index]) num_type_changes += 1 if num_type_changes > 1: self.flag_change(self.flags, 'warning', (row_index-1, column_index), self.worksheet, self.FLAGS['unexpected-change']) num_type_changes -= 1
Same as _validate_rows but for columns. Also ignore used_cells as _validate_rows should update used_cells.
def pushstrf(self, format, *args): return lib.zmsg_pushstrf(self._as_parameter_, format, *args)
Push formatted string as new frame to front of message. Returns 0 on success, -1 on error.
def insert_attachments(self, volumeID, attachments): log.debug("adding new attachments to volume '{}': {}".format(volumeID, attachments)) if not attachments: return rawVolume = self._req_raw_volume(volumeID) attsID = list() for index, a in enumerate(attachments): try: rawAttachment = self._assemble_attachment(a['file'], a) rawVolume['_source']['_attachments'].append(rawAttachment) attsID.append(rawAttachment['id']) except Exception: log.exception("Error while elaborating attachments array at index: {}".format(index)) raise self._db.modify_book(volumeID, rawVolume['_source'], version=rawVolume['_version']) return attsID
add attachments to an already existing volume
def _get_day_of_month(other, day_option): if day_option == 'start': return 1 elif day_option == 'end': days_in_month = _days_in_month(other) return days_in_month elif day_option is None: raise NotImplementedError else: raise ValueError(day_option)
Find the day in `other`'s month that satisfies a BaseCFTimeOffset's onOffset policy, as described by the `day_option` argument. Parameters ---------- other : cftime.datetime day_option : 'start', 'end' 'start': returns 1 'end': returns last day of the month Returns ------- day_of_month : int
def _extract_version(version_string, pattern): if version_string: match = pattern.match(version_string.strip()) if match: return match.group(1) return ""
Extract the version from `version_string` using `pattern`. Return the version as a string, with leading/trailing whitespace stripped.
def audiorate(filename): if '.wav' in filename.lower(): wf = wave.open(filename) fs = wf.getframerate() wf.close() elif '.call' in filename.lower(): fs = 333333 else: raise IOError("Unsupported audio format for file: {}".format(filename)) return fs
Determines the samplerate of the given audio recording file :param filename: filename of the audiofile :type filename: str :returns: int -- samplerate of the recording
def main(self, function): captured = self.command(function) self.default_command = captured.__name__ return captured
Decorator to define the main function of the experiment. The main function of an experiment is the default command that is being run when no command is specified, or when calling the run() method. Usually it is more convenient to use ``automain`` instead.
def get_entries(path): dirs, files = [], [] for entry in os.listdir(path): if os.path.isdir(os.path.join(path, entry)): dirs.append(entry) else: files.append(entry) dirs.sort() files.sort() return dirs, files
Return sorted lists of directories and files in the given path.
def _handle_return(self, node, scope, ctxt, stream): self._dlog("handling return") if node.expr is None: ret_val = None else: ret_val = self._handle_node(node.expr, scope, ctxt, stream) self._dlog("return value = {}".format(ret_val)) raise errors.InterpReturn(ret_val)
Handle Return nodes :node: TODO :scope: TODO :ctxt: TODO :stream: TODO :returns: TODO
def plot_pot(self, colorbar=True, cb_orientation='vertical', cb_label='Potential, m$^2$ s$^{-2}$', ax=None, show=True, fname=None, **kwargs): if ax is None: fig, axes = self.pot.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, show=False, **kwargs) if show: fig.show() if fname is not None: fig.savefig(fname) return fig, axes else: self.pot.plot(colorbar=colorbar, cb_orientation=cb_orientation, cb_label=cb_label, ax=ax, **kwargs)
Plot the gravitational potential. Usage ----- x.plot_pot([tick_interval, xlabel, ylabel, ax, colorbar, cb_orientation, cb_label, show, fname, **kwargs]) Parameters ---------- tick_interval : list or tuple, optional, default = [30, 30] Intervals to use when plotting the x and y ticks. If set to None, ticks will not be plotted. xlabel : str, optional, default = 'longitude' Label for the longitude axis. ylabel : str, optional, default = 'latitude' Label for the latitude axis. ax : matplotlib axes object, optional, default = None A single matplotlib axes object where the plot will appear. colorbar : bool, optional, default = True If True, plot a colorbar. cb_orientation : str, optional, default = 'vertical' Orientation of the colorbar: either 'vertical' or 'horizontal'. cb_label : str, optional, default = 'potential, m s$^{-1}$' Text label for the colorbar. show : bool, optional, default = True If True, plot the image to the screen. fname : str, optional, default = None If present, and if axes is not specified, save the image to the specified file. kwargs : optional Keyword arguements that will be sent to the SHGrid.plot() and plt.imshow() methods.
def _number_of_line(member_tuple): member = member_tuple[1] try: return member.__code__.co_firstlineno except AttributeError: pass try: return inspect.findsource(member)[1] except BaseException: pass for value in vars(member).values(): try: return value.__code__.co_firstlineno except AttributeError: pass return 0
Try to return the number of the first line of the definition of a member of a module.
def dispatch(self, request, start_response): dispatched_response = self.dispatch_non_api_requests(request, start_response) if dispatched_response is not None: return dispatched_response try: return self.call_backend(request, start_response) except errors.RequestError as error: return self._handle_request_error(request, error, start_response)
Handles dispatch to apiserver handlers. This typically ends up calling start_response and returning the entire body of the response. Args: request: An ApiRequest, the request from the user. start_response: A function with semantics defined in PEP-333. Returns: A string, the body of the response.
def ConfigureLazyWorkers(self): lazy_worker_instances = self.__GetMissingWorkers() if not lazy_worker_instances: return reachable_states = self.__AreInstancesReachable(lazy_worker_instances) reachable_instances = [t[0] for t in zip(lazy_worker_instances, reachable_states) if t[1]] print 'reachable_instances: %s' % reachable_instances self.__ConfigureWorkers(reachable_instances) return
Lazy workers are instances that are running and reachable but failed to register with the cldb to join the mapr cluster. This trys to find these missing workers and add them to the cluster.
def users(self, params): resource = self.RESOURCE_USERS.format(account_id=self.account.id, id=self.id) headers = {'Content-Type': 'application/json'} response = Request(self.account.client, 'post', resource, headers=headers, body=json.dumps(params)).perform() success_count = response.body['data']['success_count'] total_count = response.body['data']['total_count'] return (success_count, total_count)
This is a private API and requires whitelisting from Twitter. This endpoint will allow partners to add, update and remove users from a given tailored_audience_id. The endpoint will also accept multiple user identifier types per user as well.
def _check_random_state(random_state): if random_state is None or isinstance(random_state, int): return sci.random.RandomState(random_state) elif isinstance(random_state, sci.random.RandomState): return random_state else: raise TypeError('Seed should be None, int or np.random.RandomState')
Checks and processes user input for seeding random numbers. Parameters ---------- random_state : int, RandomState instance or None If int, a RandomState instance is created with this integer seed. If RandomState instance, random_state is returned; If None, a RandomState instance is created with arbitrary seed. Returns ------- scipy.random.RandomState instance Raises ------ TypeError If ``random_state`` is not appropriately set.
def _validate_ard_shape(self, name, value, ARD=None): if ARD is None: ARD = np.asarray(value).squeeze().shape != () if ARD: value = value * np.ones(self.input_dim, dtype=settings.float_type) if self.input_dim == 1 or not ARD: correct_shape = () else: correct_shape = (self.input_dim,) if np.asarray(value).squeeze().shape != correct_shape: raise ValueError("shape of {} does not match input_dim".format(name)) return value, ARD
Validates the shape of a potentially ARD hyperparameter :param name: The name of the parameter (used for error messages) :param value: A scalar or an array. :param ARD: None, False, or True. If None, infers ARD from shape of value. :return: Tuple (value, ARD), where _value_ is a scalar if input_dim==1 or not ARD, array otherwise. The _ARD_ is False if input_dim==1 or not ARD, True otherwise.
def disable_reporting(self): self.reporting = False msg = bytearray([REPORT_DIGITAL + self.port_number, 0]) self.board.sp.write(msg)
Disable the reporting of the port.
def execute_message_call(laser_evm, callee_address: str) -> None: open_states = laser_evm.open_states[:] del laser_evm.open_states[:] for open_world_state in open_states: if open_world_state[callee_address].deleted: log.debug("Can not execute dead contract, skipping.") continue next_transaction_id = get_next_transaction_id() transaction = MessageCallTransaction( world_state=open_world_state, identifier=next_transaction_id, gas_price=symbol_factory.BitVecSym( "gas_price{}".format(next_transaction_id), 256 ), gas_limit=8000000, origin=symbol_factory.BitVecSym( "origin{}".format(next_transaction_id), 256 ), caller=symbol_factory.BitVecVal(ATTACKER_ADDRESS, 256), callee_account=open_world_state[callee_address], call_data=SymbolicCalldata(next_transaction_id), call_value=symbol_factory.BitVecSym( "call_value{}".format(next_transaction_id), 256 ), ) _setup_global_state_for_execution(laser_evm, transaction) laser_evm.exec()
Executes a message call transaction from all open states. :param laser_evm: :param callee_address:
def limit(self, limit): if self._limit != limit: self.dirty = True self._limit = limit return self
Limit the number of rows returned from the database. :param limit: The number of rows to return in the recipe. 0 will return all rows. :type limit: int
def phase_parents_by_transmission(g, window_size): check_type(g, GenotypeArray) check_dtype(g.values, 'i1') check_ploidy(g.ploidy, 2) if g.is_phased is None: raise ValueError('genotype array must first have progeny phased by transmission') check_min_samples(g.n_samples, 3) g._values = memoryview_safe(g.values) g._is_phased = memoryview_safe(g.is_phased) _opt_phase_parents_by_transmission(g.values, g.is_phased.view('u1'), window_size) return g
Phase parent genotypes from a trio or cross, given progeny genotypes already phased by Mendelian transmission. Parameters ---------- g : GenotypeArray Genotype array, with parents as first two columns and progeny as remaining columns, where progeny genotypes are already phased. window_size : int Number of previous heterozygous sites to include when phasing each parent. A number somewhere between 10 and 100 may be appropriate, depending on levels of heterozygosity and quality of data. Returns ------- g : GenotypeArray Genotype array with parents phased where possible.
def _walk(self, target, visitor): visited = set() def walk(current): if current not in visited: visited.add(current) keep_going = visitor(current) if keep_going: for dependency in self.dependencies(current): walk(dependency) walk(target)
Walks the dependency graph for the given target. :param target: The target to start the walk from. :param visitor: A function that takes a target and returns `True` if its dependencies should also be visited.
def values_at(self, depth): if depth < 1: yield self else: for dict_tree in self.values(): for value in dict_tree.values_at(depth - 1): yield value
Iterate values at specified depth.
def get_container_id(self, container_id=None): if container_id == None and self.container_id == None: bot.exit('You must provide a container_id.') container_id = container_id or self.container_id return container_id
a helper function shared between functions that will return a container_id. First preference goes to a container_id provided by the user at runtime. Second preference goes to the container_id instantiated with the client. Parameters ========== container_id: image uri to parse (required)
def _encode_ndef_uri_type(self, data): t = 0x0 for (code, prefix) in uri_identifiers: if data[:len(prefix)].decode('latin-1').lower() == prefix: t = code data = data[len(prefix):] break data = yubico_util.chr_byte(t) + data return data
Implement NDEF URI Identifier Code. This is a small hack to replace some well known prefixes (such as http://) with a one byte code. If the prefix is not known, 0x00 is used.
def write_fasta_file(self, outfile, force_rerun=False): if ssbio.utils.force_rerun(flag=force_rerun, outfile=outfile): SeqIO.write(self, outfile, "fasta") self.sequence_path = outfile
Write a FASTA file for the protein sequence, ``seq`` will now load directly from this file. Args: outfile (str): Path to new FASTA file to be written to force_rerun (bool): If an existing file should be overwritten
def SignedVarintEncode(value): result = b"" if value < 0: value += (1 << 64) bits = value & 0x7f value >>= 7 while value: result += HIGH_CHR_MAP[bits] bits = value & 0x7f value >>= 7 result += CHR_MAP[bits] return result
Encode a signed integer as a zigzag encoded signed integer.
def stage_tc_create_security_label(self, label, resource): sl_resource = resource.security_labels(label) sl_resource.http_method = 'POST' sl_response = sl_resource.request() if sl_response.get('status') != 'Success': self.log.warning( '[tcex] Failed adding security label "{}" ({}).'.format( label, sl_response.get('response').text ) )
Add a security label to a resource. Args: label (str): The security label (must exit in ThreatConnect). resource (obj): An instance of tcex resource class.
def setchival(self, bondorder, rotation): rotation = [None, "@", "@@"][(rotation % 2)] if not bondorder: if len(self.oatoms) < 3 and self.explicit_hcount != 1: raise PinkyError("Need to have an explicit hydrogen when specifying "\ "chirality with less than three bonds") self._chirality = chirality.T(self.oatoms, rotation) return if len(bondorder) != len(self.bonds): raise AtomError("The order of all bonds must be specified") for bond in bondorder: if bond not in self.bonds: raise AtomError("Specified bonds to assign chirality are not attatched to atom") order = [bond.xatom(self) for bond in bonds] self._chirality = chirality.T(order, rotation)
compute chiral ordering of surrounding atoms
def handle_pre_response(self, item_session: ItemSession) -> Actions: action = self.consult_pre_response_hook(item_session) if action == Actions.RETRY: item_session.set_status(Status.skipped) elif action == Actions.FINISH: item_session.set_status(Status.done) elif action == Actions.STOP: raise HookStop('Script requested immediate stop.') return action
Process a response that is starting.
def get_current_user(): thread_local = AutomatedLoggingMiddleware.thread_local if hasattr(thread_local, 'current_user'): user = thread_local.current_user if isinstance(user, AnonymousUser): user = None else: user = None return user
Get current user object from middleware