code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def error(self): for item in self: if isinstance(item, WorkItem) and item.error: return item.error return None
Returns the error for this barrier and all work items, if any.
def get_tan_mechanisms(self): retval = OrderedDict() for version in sorted(IMPLEMENTED_HKTAN_VERSIONS.keys()): for seg in self.bpd.find_segments('HITANS', version): for parameter in seg.parameter.twostep_parameters: if parameter.security_function in self.allowed_security_functions: retval[parameter.security_function] = parameter return retval
Get the available TAN mechanisms. Note: Only checks for HITANS versions listed in IMPLEMENTED_HKTAN_VERSIONS. :return: Dictionary of security_function: TwoStepParameters objects.
def from_url(reddit_session, url, comment_limit=0, comment_sort=None, comments_only=False, params=None): if params is None: params = {} parsed = urlparse(url) query_pairs = parse_qs(parsed.query) get_params = dict((k, ",".join(v)) for k, v in query_pairs.items()) params.update(get_params) url = urlunparse(parsed[:3] + ("", "", "")) if comment_limit is None: params['limit'] = 2048 elif comment_limit > 0: params['limit'] = comment_limit if comment_sort: params['sort'] = comment_sort response = reddit_session.request_json(url, params=params) if comments_only: return response[1]['data']['children'] submission = Submission.from_json(response) submission._comment_sort = comment_sort submission._params = params return submission
Request the url and return a Submission object. :param reddit_session: The session to make the request with. :param url: The url to build the Submission object from. :param comment_limit: The desired number of comments to fetch. If <= 0 fetch the default number for the session's user. If None, fetch the maximum possible. :param comment_sort: The sort order for retrieved comments. When None use the default for the session's user. :param comments_only: Return only the list of comments. :param params: dictionary containing extra GET data to put in the url.
def kwarg(string, separator='='): if separator not in string: raise ValueError("Separator '%s' not in value '%s'" % (separator, string)) if string.strip().startswith(separator): raise ValueError("Value '%s' starts with separator '%s'" % (string, separator)) if string.strip().endswith(separator): raise ValueError("Value '%s' ends with separator '%s'" % (string, separator)) if string.count(separator) != 1: raise ValueError("Value '%s' should only have one '%s' separator" % (string, separator)) key, value = string.split(separator) return {key: value}
Return a dict from a delimited string.
def start(self, version=None, **kwargs): if not version: version = self.mostRecentVersion pysc2Version = lib.Version( version.version, version.baseVersion, version.dataHash, version.fixedHash) return sc_process.StarcraftProcess( self, exec_path=self.exec_path(version.baseVersion), version=pysc2Version, **kwargs)
Launch the game process.
def collect(self): for func in self._caches: cache = {} for key in self._caches[func]: if (time.time() - self._caches[func][key][1]) < self._timeouts[func]: cache[key] = self._caches[func][key] self._caches[func] = cache
Clear cache of results which have timed out
def get_local_extrema(self, find_min=True, threshold_frac=None, threshold_abs=None): sign, extrema_type = 1, "local maxima" if find_min: sign, extrema_type = -1, "local minima" total_chg = sign * self.chgcar.data["total"] total_chg = np.tile(total_chg, reps=(3, 3, 3)) coordinates = peak_local_max(total_chg, min_distance=1) f_coords = [coord / total_chg.shape * 3 for coord in coordinates] f_coords = [f - 1 for f in f_coords if all(np.array(f) < 2) and all(np.array(f) >= 1)] self._update_extrema(f_coords, extrema_type, threshold_frac=threshold_frac, threshold_abs=threshold_abs) return self.extrema_coords
Get all local extrema fractional coordinates in charge density, searching for local minimum by default. Note that sites are NOT grouped symmetrically. Args: find_min (bool): True to find local minimum else maximum, otherwise find local maximum. threshold_frac (float): optional fraction of extrema shown, which returns `threshold_frac * tot_num_extrema` extrema fractional coordinates based on highest/lowest intensity. E.g. set 0.2 to show the extrema with 20% highest or lowest intensity. Value range: 0 <= threshold_frac <= 1 Note that threshold_abs and threshold_frac should not set in the same time. threshold_abs (float): optional filter. When searching for local minima, intensity <= threshold_abs returns; when searching for local maxima, intensity >= threshold_abs returns. Note that threshold_abs and threshold_frac should not set in the same time. Returns: extrema_coords (list): list of fractional coordinates corresponding to local extrema.
def local_accuracy(X_train, y_train, X_test, y_test, attr_test, model_generator, metric, trained_model): X_train, X_test = to_array(X_train, X_test) assert X_train.shape[1] == X_test.shape[1] yp_test = trained_model.predict(X_test) return metric(yp_test, strip_list(attr_test).sum(1))
The how well do the features plus a constant base rate sum up to the model output.
def x_build_targets_target( self, node ): target_node = node name = self.get_child_data(target_node,tag='name',strip=True) path = self.get_child_data(target_node,tag='path',strip=True) jam_target = self.get_child_data(target_node,tag='jam-target',strip=True) self.target[jam_target] = { 'name' : name, 'path' : path } dep_node = self.get_child(self.get_child(target_node,tag='dependencies'),tag='dependency') while dep_node: child = self.get_data(dep_node,strip=True) child_jam_target = '<p%s>%s' % (path,child.split('//',1)[1]) self.parent[child_jam_target] = jam_target dep_node = self.get_sibling(dep_node.nextSibling,tag='dependency') return None
Process the target dependency DAG into an ancestry tree so we can look up which top-level library and test targets specific build actions correspond to.
def setnx(self, key, value): return self.set(key, value, nx=True)
Set the value of ``key`` to ``value`` if key doesn't exist
def filter_host_by_regex(regex): host_re = re.compile(regex) def inner_filter(items): host = items["host"] if host is None: return False return host_re.match(host.host_name) is not None return inner_filter
Filter for host Filter on regex :param regex: regex to filter :type regex: str :return: Filter :rtype: bool
def candle_lighting(self): today = HDate(gdate=self.date, diaspora=self.location.diaspora) tomorrow = HDate(gdate=self.date + dt.timedelta(days=1), diaspora=self.location.diaspora) if ((today.is_yom_tov or today.is_shabbat) and (tomorrow.is_yom_tov or tomorrow.is_shabbat)): return self._havdalah_datetime if tomorrow.is_shabbat or tomorrow.is_yom_tov: return (self.zmanim["sunset"] - dt.timedelta(minutes=self.candle_lighting_offset)) return None
Return the time for candle lighting, or None if not applicable.
def _fetch_templates(src): templates = [] log.debug('Listing contents of %s', src) for item in os.listdir(src): s = os.path.join(src, item) if os.path.isdir(s): template_path = os.path.join(s, TEMPLATE_FILE_NAME) if os.path.isfile(template_path): templates.append(_get_template(template_path, item)) else: log.debug("Directory does not contain %s %s", template_path, TEMPLATE_FILE_NAME) return templates
Fetch all of the templates in the src directory :param src: The source path :type src: ``str`` :rtype: ``list`` of ``tuple`` :returns: ``list`` of ('key', 'description')
def download(self, image, url_field='url', suffix=None): url = getattr(image, url_field) if suffix is not None: url = '.'.join(url, suffix) response = self.session.get(url) return response.content
Download the binary data of an image attachment. :param image: an image attachment :type image: :class:`~groupy.api.attachments.Image` :param str url_field: the field of the image with the right URL :param str suffix: an optional URL suffix :return: binary image data :rtype: bytes
def __add_prop(self, key, admin=False): def getter(self): return self.config[key] def setter(self, val): if admin and not self.admin: raise RuntimeError( f"You can't set the {key} key without mod privileges" ) self.__set_config_value(self.config.get_real_key(key), val) setattr(self.__class__, key, property(getter, setter))
Add gettable and settable room config property during runtime
def join_tables(left, right, key_left, key_right, cols_right=None): right = right.copy() if cols_right is None: cols_right = right.colnames else: cols_right = [c for c in cols_right if c in right.colnames] if key_left != key_right: right[key_right].name = key_left if key_left not in cols_right: cols_right += [key_left] out = join(left, right[cols_right], keys=key_left, join_type='left') for col in out.colnames: if out[col].dtype.kind in ['S', 'U']: out[col].fill_value = '' elif out[col].dtype.kind in ['i']: out[col].fill_value = 0 else: out[col].fill_value = np.nan return out.filled()
Perform a join of two tables. Parameters ---------- left : `~astropy.Table` Left table for join. right : `~astropy.Table` Right table for join. key_left : str Key used to match elements from ``left`` table. key_right : str Key used to match elements from ``right`` table. cols_right : list Subset of columns from ``right`` table that will be appended to joined table.
def _counts(self): rolling_dim = utils.get_temp_dimname(self.obj.dims, '_rolling_dim') counts = (self.obj.notnull() .rolling(center=self.center, **{self.dim: self.window}) .construct(rolling_dim, fill_value=False) .sum(dim=rolling_dim, skipna=False)) return counts
Number of non-nan entries in each rolling window.
def handle_command_exit_code(self, code): ca = self.call_args exc_class = get_exc_exit_code_would_raise(code, ca["ok_code"], ca["piped"]) if exc_class: exc = exc_class(self.ran, self.process.stdout, self.process.stderr, ca["truncate_exc"]) raise exc
here we determine if we had an exception, or an error code that we weren't expecting to see. if we did, we create and raise an exception
def parse(self, argument): if not self.enum_values: return argument elif self.case_sensitive: if argument not in self.enum_values: raise ValueError('value should be one of <%s>' % '|'.join(self.enum_values)) else: return argument else: if argument.upper() not in [value.upper() for value in self.enum_values]: raise ValueError('value should be one of <%s>' % '|'.join(self.enum_values)) else: return [value for value in self.enum_values if value.upper() == argument.upper()][0]
Determine validity of argument and return the correct element of enum. If self.enum_values is empty, then all arguments are valid and argument will be returned. Otherwise, if argument matches an element in enum, then the first matching element will be returned. Args: argument: The supplied flag value. Returns: The matching element from enum_values, or argument if enum_values is empty. Raises: ValueError: enum_values was non-empty, but argument didn't match anything in enum.
def delete(self, namespace, key): if self.key_exists(namespace, key): obj = db.ConfigItem.find_one( ConfigItem.namespace_prefix == namespace, ConfigItem.key == key ) del self.__data[namespace][key] db.session.delete(obj) db.session.commit() else: raise KeyError('{}/{}'.format(namespace, key))
Remove a configuration item from the database Args: namespace (`str`): Namespace of the config item key (`str`): Key to delete Returns: `None`
def change_password(self, new_password, email): log.info("[+] Changing the password of the account") return self._send_xmpp_element(account.ChangePasswordRequest(self.password, new_password, email, self.username))
Changes the login password :param new_password: The new login password to set for the account :param email: The current email of the account
def create(cls, term, *ranges): if not isinstance(term, Scalar): term = ScalarValue.create(term) return super().create(term, *ranges)
Instantiate the indexed sum while applying simplification rules
def proto_files(root): for (dirpath, _, filenames) in os.walk(root): for filename in filenames: if filename.endswith('.proto'): yield os.path.join(dirpath, filename)
Yields the path of all .proto files under the root.
def toTag(self, output): feed = output.createElement('feed') feed.setAttribute('name', self.name) feed.setAttribute('priority', str(self.priority)) schedule = output.createElement('schedule') schedule.setAttribute('dayOfMonth', self.dayOfMonth) schedule.setAttribute('dayOfWeek', self.dayOfWeek) schedule.setAttribute('hour', self.hour) schedule.setAttribute('minute', self.minute) if self.retry: schedule.setAttribute('retry', self.retry) feed.appendChild(schedule) url = output.createElement('url') url.appendChild(output.createTextNode(self.url)) feed.appendChild(url) if self.source: source = output.createElement('source') source.appendChild(output.createTextNode(self.source)) feed.appendChild(source) return feed
This methods returns all data of this feed as feed xml tag :param output: XML Document to which the data should be added :type output: xml.dom.DOMImplementation.createDocument
def _remove(self, n): if os.path.isfile(n): os.remove(n) if not os.path.isfile(n): print("File '{0}' removed".format(n))
Remove one single file
def getLocalDateAndTime(date, time, *args, **kwargs): localDt = getLocalDatetime(date, time, *args, **kwargs) if time is not None: return (localDt.date(), localDt.timetz()) else: return (localDt.date(), None)
Get the date and time in the local timezone from date and optionally time
def run_and_exit_if(opts, action, *names): for name in names: if name in opts: action() sys.exit(0)
Run the no-arg function `action` if any of `names` appears in the option dict `opts`.
def run_conditional_decorators(self, context): logger.debug("starting") run_me = context.get_formatted_as_type(self.run_me, out_type=bool) skip_me = context.get_formatted_as_type(self.skip_me, out_type=bool) swallow_me = context.get_formatted_as_type(self.swallow_me, out_type=bool) if run_me: if not skip_me: try: if self.retry_decorator: self.retry_decorator.retry_loop(context, self.invoke_step) else: self.invoke_step(context=context) except Exception as ex_info: if swallow_me: logger.error( f"{self.name} Ignoring error because swallow " "is True for this step.\n" f"{type(ex_info).__name__}: {ex_info}") else: raise else: logger.info( f"{self.name} not running because skip is True.") else: logger.info(f"{self.name} not running because run is False.") logger.debug("done")
Evaluate the step decorators to decide whether to run step or not. Use pypyr.dsl.Step.run_step if you intend on executing the step the same way pypyr does. Args: context: (pypyr.context.Context) The pypyr context. This arg will mutate.
def camelResource(obj): if not isinstance(obj, dict): return obj for k in list(obj.keys()): v = obj.pop(k) obj["%s%s" % (k[0].upper(), k[1:])] = v if isinstance(v, dict): camelResource(v) elif isinstance(v, list): list(map(camelResource, v)) return obj
Some sources from apis return lowerCased where as describe calls always return TitleCase, this function turns the former to the later
def update(self, workspace, params={}, **options): path = "/workspaces/%s" % (workspace) return self.client.put(path, params, **options)
A specific, existing workspace can be updated by making a PUT request on the URL for that workspace. Only the fields provided in the data block will be updated; any unspecified fields will remain unchanged. Currently the only field that can be modified for a workspace is its `name`. Returns the complete, updated workspace record. Parameters ---------- workspace : {Id} The workspace to update. [data] : {Object} Data for the request
def make_retrieveParameters(offset=1, count=100, name='RS', sort='D'): return _OrderedDict([ ('firstRecord', offset), ('count', count), ('sortField', _OrderedDict([('name', name), ('sort', sort)])) ])
Create retrieve parameters dictionary to be used with APIs. :count: Number of records to display in the result. Cannot be less than 0 and cannot be greater than 100. If count is 0 then only the summary information will be returned. :offset: First record in results to return. Must be greater than zero :name: Name of the field to order by. Use a two-character abbreviation to specify the field ('AU': Author, 'CF': Conference Title, 'CG': Page, 'CW': Source, 'CV': Volume, 'LC': Local Times Cited, 'LD': Load Date, 'PG': Page, 'PY': Publication Year, 'RS': Relevance, 'SO': Source, 'TC': Times Cited, 'VL': Volume) :sort: Must be A (ascending) or D (descending). The sort parameter can only be D for Relevance and TimesCited.
def setwrap(value: Any) -> Set[str]: return set(map(str, set(flatten([value]))))
Returns a flattened and stringified set from the given object or iterable. For use in public functions which accept argmuents or kwargs that can be one object or a list of objects.
def pop_event(self, event_name, timeout=DEFAULT_TIMEOUT): if not self.started: raise IllegalStateError( "Dispatcher needs to be started before popping.") e_queue = self.get_event_q(event_name) if not e_queue: raise TypeError("Failed to get an event queue for {}".format( event_name)) try: if timeout: return e_queue.get(True, timeout) elif timeout == 0: return e_queue.get(False) else: return e_queue.get(True) except queue.Empty: raise queue.Empty('Timeout after {}s waiting for event: {}'.format( timeout, event_name))
Pop an event from its queue. Return and remove the oldest entry of an event. Block until an event of specified name is available or times out if timeout is set. Args: event_name: Name of the event to be popped. timeout: Number of seconds to wait when event is not present. Never times out if None. Returns: The oldest entry of the specified event. None if timed out. Raises: IllegalStateError: Raised if pop is called before the dispatcher starts polling.
def to_sparse(self, fill_value=None, kind='block'): from pandas.core.sparse.api import SparseDataFrame return SparseDataFrame(self._series, index=self.index, columns=self.columns, default_kind=kind, default_fill_value=fill_value)
Convert to SparseDataFrame. Implement the sparse version of the DataFrame meaning that any data matching a specific value it's omitted in the representation. The sparse DataFrame allows for a more efficient storage. Parameters ---------- fill_value : float, default None The specific value that should be omitted in the representation. kind : {'block', 'integer'}, default 'block' The kind of the SparseIndex tracking where data is not equal to the fill value: - 'block' tracks only the locations and sizes of blocks of data. - 'integer' keeps an array with all the locations of the data. In most cases 'block' is recommended, since it's more memory efficient. Returns ------- SparseDataFrame The sparse representation of the DataFrame. See Also -------- DataFrame.to_dense : Converts the DataFrame back to the its dense form. Examples -------- >>> df = pd.DataFrame([(np.nan, np.nan), ... (1., np.nan), ... (np.nan, 1.)]) >>> df 0 1 0 NaN NaN 1 1.0 NaN 2 NaN 1.0 >>> type(df) <class 'pandas.core.frame.DataFrame'> >>> sdf = df.to_sparse() >>> sdf 0 1 0 NaN NaN 1 1.0 NaN 2 NaN 1.0 >>> type(sdf) <class 'pandas.core.sparse.frame.SparseDataFrame'>
def begin_recording(self): logger.info("[RewardProxyServer] [%d] Starting recording", self.id) if self._closed: logger.error( "[RewardProxyServer] [%d] Attempted to start writing although client connection is already closed. Aborting", self.id) self.close() return if self._n_open_files != 0: logger.error("[RewardProxyServer] [%d] WARNING: n open rewards files = %s. This is unexpected. Dropping connection.", self.id, self._n_open_files) self.close() return logfile_path = os.path.join(self.factory.logfile_dir, 'rewards.demo') logger.info('Recording to {}'.format(logfile_path)) self.file = open(logfile_path, 'w') self._n_open_files += 1 logger.info("[RewardProxyServer] [%d] n open rewards files incremented: %s", self.id, self._n_open_files) self.file.write(json.dumps({ 'version': 1, '_debug_version': '0.0.1', })) self.file.write('\n') self.file.flush() logger.info("[RewardProxyServer] [%d] Wrote version number", self.id)
Open the file and write the metadata header to describe this recording. Called after we establish an end-to-end connection This uses Version 1 of our protocol Version 0 can be seen here: https://github.com/openai/universe/blob/f85a7779c3847fa86ec7bb513a1da0d3158dda78/bin/recording_agent.py
def custom_arg(self, custom_arg): if isinstance(custom_arg, list): for c in custom_arg: self.add_custom_arg(c) else: self.add_custom_arg(custom_arg)
Add custom args to the email :param value: A list of CustomArg objects or a dict of custom arg key/values :type value: CustomArg, list(CustomArg), dict
def set_logger(self, logger_name, level, handler=None): if 'loggers' not in self.config: self.config['loggers'] = {} real_level = self.real_level(level) self.config['loggers'][logger_name] = {'level': real_level} if handler: self.config['loggers'][logger_name]['handlers'] = [handler]
Sets the level of a logger
def add_tmpltbank_from_hdf_file(self, hdf_fp, vary_fupper=False): mass1s = hdf_fp['mass1'][:] mass2s = hdf_fp['mass2'][:] spin1zs = hdf_fp['spin1z'][:] spin2zs = hdf_fp['spin2z'][:] for idx in xrange(len(mass1s)): self.add_point_by_masses(mass1s[idx], mass2s[idx], spin1zs[idx], spin2zs[idx], vary_fupper=vary_fupper)
This function will take a pointer to an open HDF File object containing a list of templates and add them into the partitioned template bank object. Parameters ----------- hdf_fp : h5py.File object The template bank in HDF5 format. vary_fupper : False If given also include the additional information needed to compute distances with a varying upper frequency cutoff.
def regex(self): if not self._compiled_regex: self._compiled_regex = re.compile(self.raw) return self._compiled_regex
Return compiled regex.
def find_label(self, label: Label): for index, action in enumerate(self.program): if isinstance(action, JumpTarget): if label == action.label: return index raise RuntimeError("Improper program - Jump Target not found in the " "input program!")
Helper function that iterates over the program and looks for a JumpTarget that has a Label matching the input label. :param label: Label object to search for in program :return: Program index where ``label`` is found
def get_maps(A): N = A.shape[0] flat_map = [] for i in range(1, N): for j in range(1, N): flat_map.append([i, j]) flat_map = np.array(flat_map) square_map = np.zeros(A.shape, 'int') for k in range((N - 1) ** 2): i, j = flat_map[k] square_map[i, j] = k return flat_map, square_map
Get mappings from the square array A to the flat vector of parameters alpha. Helper function for PCCA+ optimization. Parameters ---------- A : ndarray The transformation matrix A. Returns ------- flat_map : ndarray Mapping from flat indices (k) to square (i,j) indices. square map : ndarray Mapping from square indices (i,j) to flat indices (k).
def set_current_filename(self, filename, focus=True): index = self.has_filename(filename) if index is not None: if focus: self.set_stack_index(index) editor = self.data[index].editor if focus: editor.setFocus() else: self.stack_history.remove_and_append(index) return editor
Set current filename and return the associated editor instance.
def Extinction(extval,name=None): try: ext=Cache.RedLaws[name].reddening(extval) except AttributeError: Cache.RedLaws[name]=RedLaw(Cache.RedLaws[name]) ext=Cache.RedLaws[name].reddening(extval) except KeyError: try: Cache.RedLaws[name]=RedLaw(name) ext=Cache.RedLaws[name].reddening(extval) except IOError: try: ext=extinction.DeprecatedExtinction(extval,name) except KeyError: raise ValueError('No extinction law has been defined for "%s", and no such file exists'%name) return ext
Generate extinction curve to be used with spectra. By default, :meth:`~CustomRedLaw.reddening` is used to generate the extinction curve. If a deprecated reddening law is given, then `~pysynphot.extinction.DeprecatedExtinction` is used instead. .. note:: Reddening laws are cached in ``pysynphot.Cache.RedLaws`` for better performance. Repeated calls to the same reddening law here returns the cached result. Parameters ---------- extval : float Value of :math:`E(B-V)` in magnitudes. name : str or `None` Name of reddening law (see :func:`print_red_laws`). If `None` (default), the average Milky Way extinction (``'mwavg'``) will be used. Returns ------- ext : `~pysynphot.spectrum.ArraySpectralElement` or `~pysynphot.extinction.DeprecatedExtinction` Extinction curve. Raises ------ ValueError Invalid reddening law. Examples -------- >>> ext = S.Extinction(0.3, 'mwavg')
def evaluate(self, env): if self.ident in env.functions: arg_vals = [expr.evaluate(env) for expr in self.args] try: out = env.functions[self.ident](*arg_vals) except Exception as exc: return u'<%s>' % str(exc) return str(out) else: return self.original
Evaluate the function call in the environment, returning a Unicode string.
def validate_broker_ids_subset(broker_ids, subset_ids): all_ids = set(broker_ids) valid = True for subset_id in subset_ids: valid = valid and subset_id in all_ids if subset_id not in all_ids: print("Error: user specified broker id {0} does not exist in cluster.".format(subset_id)) return valid
Validate that user specified broker ids to restart exist in the broker ids retrieved from cluster config. :param broker_ids: all broker IDs in a cluster :type broker_ids: list of integers :param subset_ids: broker IDs specified by user :type subset_ids: list of integers :returns: bool
async def close(self): if not self._conn: return c = await self._execute(self._conn.close) self._conn = None return c
Close pyodbc connection
def set_qword_at_rva(self, rva, qword): return self.set_bytes_at_rva(rva, self.get_data_from_qword(qword))
Set the quad-word value at the file offset corresponding to the given RVA.
def simhash(self, content): if content is None: self.hash = -1 return if isinstance(content, str): features = self.tokenizer_func(content, self.keyword_weight_pari) self.hash = self.build_from_features(features) elif isinstance(content, collections.Iterable): self.hash = self.build_from_features(content) elif isinstance(content, int): self.hash = content else: raise Exception("Unsupported parameter type %s" % type(content))
Select policies for simhash on the different types of content.
def _read_journal(self): root = self._filesystem.inspect_get_roots()[0] inode = self._filesystem.stat('C:\\$Extend\\$UsnJrnl')['ino'] with NamedTemporaryFile(buffering=0) as tempfile: self._filesystem.download_inode(root, inode, tempfile.name) journal = usn_journal(tempfile.name) return parse_journal(journal)
Extracts the USN journal from the disk and parses its content.
def clone(self, **data): meta = self._meta session = self.session pkname = meta.pkname() pkvalue = data.pop(pkname, None) fields = self.todict(exclude_cache=True) fields.update(data) fields.pop('__dbdata__', None) obj = self._meta.make_object((pkvalue, None, fields)) obj.session = session return obj
Utility method for cloning the instance as a new object. :parameter data: additional which override field data. :rtype: a new instance of this class.
def log_uuid(self, uuid): if uuid not in self.uuids and uuid in uuids: self.uuids[uuid] = uuids[uuid].describe()
Logs the object with the specified `uuid` to `self.uuids` if possible. Args: uuid (str): string value of :meth:`uuid.uuid4` value for the object.
def disable_if_done(self, commit=True): if self._is_billing_complete() and not self.disabled: self.disabled = True if commit: self.save()
Set disabled=True if we have billed all we need to Will only have an effect on one-off costs.
def _remove_action_from_type(valid_actions: Dict[str, List[str]], type_: str, filter_function: Callable[[str], bool]) -> None: action_list = valid_actions[type_] matching_action_index = [i for i, action in enumerate(action_list) if filter_function(action)] assert len(matching_action_index) == 1, "Filter function didn't find one action" action_list.pop(matching_action_index[0])
Finds the production rule matching the filter function in the given type's valid action list, and removes it. If there is more than one matching function, we crash.
def ComputeRoot(hashes): if not len(hashes): raise Exception('Hashes must have length') if len(hashes) == 1: return hashes[0] tree = MerkleTree(hashes) return tree.Root.Hash
Compute the root hash. Args: hashes (list): the list of hashes to build the root from. Returns: bytes: the root hash.
def to_dicts(recarray): for rec in recarray: yield dict(zip(recarray.dtype.names, rec.tolist()))
convert record array to a dictionaries
def to_dict(self): return {'schema': self.schema, 'table': self.table, 'name': self.name, 'type': self.type}
Serialize representation of the column for local caching.
def allow_ip(*ips: typing.Union[str, ipaddress.IPv4Network, ipaddress.IPv4Address]): for ip in ips: if isinstance(ip, ipaddress.IPv4Address): allowed_ips.add(ip) elif isinstance(ip, str): allowed_ips.add(ipaddress.IPv4Address(ip)) elif isinstance(ip, ipaddress.IPv4Network): allowed_ips.update(ip.hosts()) else: raise ValueError(f"Bad type of ipaddress: {type(ip)} ('{ip}')")
Allow ip address. :param ips: :return:
def group_perms_for_user(cls, instance, user, db_session=None): db_session = get_db_session(db_session, instance) perms = resource_permissions_for_users( cls.models_proxy, ANY_PERMISSION, resource_ids=[instance.resource_id], user_ids=[user.id], db_session=db_session, ) perms = [p for p in perms if p.type == "group"] groups_dict = dict([(g.id, g) for g in user.groups]) if instance.owner_group_id in groups_dict: perms.append( PermissionTuple( user, ALL_PERMISSIONS, "group", groups_dict.get(instance.owner_group_id), instance, True, True, ) ) return perms
returns permissions that given user has for this resource that are inherited from groups :param instance: :param user: :param db_session: :return:
def GetReportData(self, get_report_args, token=None): ret = rdf_report_plugins.ApiReportData( representation_type=RepresentationType.AUDIT_CHART, audit_chart=rdf_report_plugins.ApiAuditChartReportData( used_fields=self.USED_FIELDS)) ret.audit_chart.rows = _LoadAuditEvents( self.HANDLERS, get_report_args, transformers=[_ExtractClientIdFromPath], token=token) return ret
Filter the cron job approvals in the given timerange.
def _get_len(self): if hasattr(self._buffer, 'len'): self._len = self._buffer.len return old_pos = self._buffer.tell() self._buffer.seek(0, 2) self._len = self._buffer.tell() self._buffer.seek(old_pos)
Return total number of bytes in buffer.
def other_object_webhook_handler(event): if event.parts[:2] == ["charge", "dispute"]: target_cls = models.Dispute else: target_cls = { "charge": models.Charge, "coupon": models.Coupon, "invoice": models.Invoice, "invoiceitem": models.InvoiceItem, "plan": models.Plan, "product": models.Product, "transfer": models.Transfer, "source": models.Source, }.get(event.category) _handle_crud_like_event(target_cls=target_cls, event=event)
Handle updates to transfer, charge, invoice, invoiceitem, plan, product and source objects. Docs for: - charge: https://stripe.com/docs/api#charges - coupon: https://stripe.com/docs/api#coupons - invoice: https://stripe.com/docs/api#invoices - invoiceitem: https://stripe.com/docs/api#invoiceitems - plan: https://stripe.com/docs/api#plans - product: https://stripe.com/docs/api#products - source: https://stripe.com/docs/api#sources
def _report(self, blocknr, blocksize, size): current = blocknr * blocksize sys.stdout.write("\r{0:.2f}%".format(100.0 * current / size))
helper for downloading the file
def is_ligolw(origin, filepath, fileobj, *args, **kwargs): if fileobj is not None: loc = fileobj.tell() fileobj.seek(0) try: line1 = fileobj.readline().lower() line2 = fileobj.readline().lower() try: return (line1.startswith(XML_SIGNATURE) and line2.startswith((LIGOLW_SIGNATURE, LIGOLW_ELEMENT))) except TypeError: return (line1.startswith(XML_SIGNATURE.decode('utf-8')) and line2.startswith((LIGOLW_SIGNATURE.decode('utf-8'), LIGOLW_ELEMENT.decode('utf-8')))) finally: fileobj.seek(loc) try: from ligo.lw.ligolw import Element except ImportError: return False try: from glue.ligolw.ligolw import Element as GlueElement except ImportError: element_types = (Element,) else: element_types = (Element, GlueElement) return len(args) > 0 and isinstance(args[0], element_types)
Identify a file object as LIGO_LW-format XML
def create_package(package_format, owner, repo, **kwargs): client = get_packages_api() with catch_raise_api_exception(): upload = getattr(client, "packages_upload_%s_with_http_info" % package_format) data, _, headers = upload( owner=owner, repo=repo, data=make_create_payload(**kwargs) ) ratelimits.maybe_rate_limit(client, headers) return data.slug_perm, data.slug
Create a new package in a repository.
def unicode_char(ignored_chars=None): return lambda e: e.unicode if e.type == pygame.KEYDOWN \ and ((ignored_chars is None) or (e.unicode not in ignored_chars))\ else EventConsumerInfo.DONT_CARE
returns a handler that listens for unicode characters
def del_subkey(self,name): self.sam |= KEY_WRITE subkey = self.get_subkey(name) subkey.clear() _winreg.DeleteKey(subkey.parent.hkey,subkey.name)
Delete the named subkey, and any values or keys it contains.
def join(self, target): password = self.config.passwords.get( target.strip(self.server_config['CHANTYPES'])) if password: target += ' ' + password self.send_line('JOIN %s' % target)
join a channel
def get_raise_brok(self, host_name, service_name=''): data = self.serialize() data['host'] = host_name if service_name != '': data['service'] = service_name return Brok({'type': 'downtime_raise', 'data': data})
Get a start downtime brok :param host_name: host concerned by the downtime :type host_name :param service_name: service concerned by the downtime :type service_name :return: brok with wanted data :rtype: alignak.brok.Brok
def getclientloansurl(idclient, *args, **kwargs): getparams = [] if kwargs: try: if kwargs["fullDetails"] == True: getparams.append("fullDetails=true") else: getparams.append("fullDetails=false") except Exception as ex: pass try: getparams.append("accountState=%s" % kwargs["accountState"]) except Exception as ex: pass clientidparam = "/" + idclient url = getmambuurl(*args,**kwargs) + "clients" + clientidparam + "/loans" + ( "" if len(getparams) == 0 else "?" + "&".join(getparams) ) return url
Request Client loans URL. How to use it? By default MambuLoan uses getloansurl as the urlfunc. Override that behaviour by sending getclientloansurl (this function) as the urlfunc to the constructor of MambuLoans (note the final 's') and voila! you get the Loans just for a certain client. If idclient is set, you'll get a response adequate for a MambuLoans object. If not set, you'll get a Jar Jar Binks object, or something quite strange and useless as JarJar. A MambuError must likely since I haven't needed it for anything but for loans of one and just one client. See mambuloan module and pydoc for further information. Currently implemented filter parameters: * accountState See Mambu official developer documentation for further details, and info on parameters that may be implemented here in the future.
def _load_api(self): self._add_url_route('get_scheduler_info', '', api.get_scheduler_info, 'GET') self._add_url_route('add_job', '/jobs', api.add_job, 'POST') self._add_url_route('get_job', '/jobs/<job_id>', api.get_job, 'GET') self._add_url_route('get_jobs', '/jobs', api.get_jobs, 'GET') self._add_url_route('delete_job', '/jobs/<job_id>', api.delete_job, 'DELETE') self._add_url_route('update_job', '/jobs/<job_id>', api.update_job, 'PATCH') self._add_url_route('pause_job', '/jobs/<job_id>/pause', api.pause_job, 'POST') self._add_url_route('resume_job', '/jobs/<job_id>/resume', api.resume_job, 'POST') self._add_url_route('run_job', '/jobs/<job_id>/run', api.run_job, 'POST')
Add the routes for the scheduler API.
def com_google_fonts_check_family_single_directory(fonts): directories = [] for target_file in fonts: directory = os.path.dirname(target_file) if directory not in directories: directories.append(directory) if len(directories) == 1: yield PASS, "All files are in the same directory." else: yield FAIL, ("Not all fonts passed in the command line" " are in the same directory. This may lead to" " bad results as the tool will interpret all" " font files as belonging to a single" " font family. The detected directories are:" " {}".format(directories))
Checking all files are in the same directory. If the set of font files passed in the command line is not all in the same directory, then we warn the user since the tool will interpret the set of files as belonging to a single family (and it is unlikely that the user would store the files from a single family spreaded in several separate directories).
def clip_lower(self, threshold): with cython_context(): return SArray(_proxy=self.__proxy__.clip(threshold, float('nan')))
Create new SArray with all values clipped to the given lower bound. This function can operate on numeric arrays, as well as vector arrays, in which case each individual element in each vector is clipped. Throws an exception if the SArray is empty or the types are non-numeric. Parameters ---------- threshold : float The lower bound used to clip values. Returns ------- out : SArray See Also -------- clip, clip_upper Examples -------- >>> sa = turicreate.SArray([1,2,3]) >>> sa.clip_lower(2) dtype: int Rows: 3 [2, 2, 3]
def update_app(id, config): if 'id' not in config: config['id'] = id config.pop('version', None) config.pop('fetch', None) data = salt.utils.json.dumps(config) try: response = salt.utils.http.query( "{0}/v2/apps/{1}?force=true".format(_base_url(), id), method='PUT', decode_type='json', decode=True, data=data, header_dict={ 'Content-Type': 'application/json', 'Accept': 'application/json', }, ) log.debug('update response: %s', response) return response['dict'] except Exception as ex: log.error('unable to update marathon app: %s', get_error_message(ex)) return { 'exception': { 'message': get_error_message(ex), } }
Update the specified app with the given configuration. CLI Example: .. code-block:: bash salt marathon-minion-id marathon.update_app my-app '<config yaml>'
def database_url(self): return 'postgres://{}:{}@{}/{}'.format( self.user, self.password, self.name, self.database)
Returns a "database URL" for use with DJ-Database-URL and similar libraries.
def register(self, entity): response = self.api.post_entity(entity.serialize) print(response) print() if response['status']['code'] == 200: entity.id = response['id'] if response['status']['code'] == 409: entity.id = next(i.id for i in self.api.agent_entities if i.name == entity.name) self.update(entity) return entity
Registers a new entity and returns the entity object with an ID
def stop(self): self.__end.set() if self.__recv_thread: self.__recv_thread.join() self.__recv_thread = None if self.__send_thread: self.__send_thread.join() self.__send_thread = None
disconnect, blocks until stopped
def getmergerequest(self, project_id, mergerequest_id): request = requests.get( '{0}/{1}/merge_request/{2}'.format(self.projects_url, project_id, mergerequest_id), headers=self.headers, verify=self.verify_ssl, auth=self.auth, timeout=self.timeout) if request.status_code == 200: return request.json() else: return False
Get information about a specific merge request. :param project_id: ID of the project :param mergerequest_id: ID of the merge request :return: dict of the merge request
def common_cli_list_options(f): @click.option( "-p", "--page", default=1, type=int, help="The page to view for lists, where 1 is the first page", callback=validators.validate_page, ) @click.option( "-l", "--page-size", default=30, type=int, help="The amount of items to view per page for lists.", callback=validators.validate_page_size, ) @click.pass_context @functools.wraps(f) def wrapper(ctx, *args, **kwargs): opts = config.get_or_create_options(ctx) kwargs["opts"] = opts return ctx.invoke(f, *args, **kwargs) return wrapper
Add common list options to commands.
def parse_sacct(sacct_stream): rows = (line.split() for line in sacct_stream) relevant_rows = (row for row in rows if row[0].isdigit()) jobs = [convert_job(row) for row in relevant_rows] return jobs
Parse out information from sacct status output.
def ensure_direct_subclass(class_, of): if not is_direct_subclass(class_, of): raise TypeError("expected a direct subclass of %r, got %s instead" % ( of, class_.__name__)) return class_
Check whether given class is a direct subclass of another. :param class_: Class to check :param of: Superclass to check against :return: ``class_``, if the check succeeds :raise TypeError: When the check fails .. versionadded:: 0.0.4
def _zoom(scale:uniform=1.0, row_pct:uniform=0.5, col_pct:uniform=0.5): "Zoom image by `scale`. `row_pct`,`col_pct` select focal point of zoom." s = 1-1/scale col_c = s * (2*col_pct - 1) row_c = s * (2*row_pct - 1) return _get_zoom_mat(1/scale, 1/scale, col_c, row_c)
Zoom image by `scale`. `row_pct`,`col_pct` select focal point of zoom.
def has_reset(self): currentTime = self._read_as_int(Addr.Uptime, 4) if currentTime <= self._ticks: self._ticks = currentTime return True self._ticks = currentTime return False
Checks the grizzly to see if it reset itself because of voltage sag or other reasons. Useful to reinitialize acceleration or current limiting.
def validate(filename, verbose=False): is_remote = filename.startswith("http://") or filename.startswith( "https://") with tempfile.TemporaryFile() if is_remote else open( filename, "rb") as f: if is_remote: r = requests.get(filename, verify=False) f.write(r.content) f.seek(0) r = requests.post( HTML_VALIDATOR_URL, files={"file": (filename, f, "text/html")}, data={ "out": "json", "showsource": "yes", }, verify=False) return r.json()
Validate file and return JSON result as dictionary. "filename" can be a file name or an HTTP URL. Return "" if the validator does not return valid JSON. Raise OSError if curl command returns an error status.
def closest_noaa(latitude, longitude): with open(env.SRC_PATH + '/inswo-stns.txt') as index: index.readline() index.readline() min_dist = 9999 station_name = '' station_name = '' for line in index: try: i = parse_noaa_line(line) new_dist = great_circle((latitude, longitude), (float(i['LAT']), float(i['LON']))).miles except: logger.error(line) raise IOError('Inventory Issue') if new_dist < min_dist: min_dist = new_dist station_name = i['station_name'] station_code = i['station_code'] index.close() return station_code, station_name raise KeyError('station not found')
Find closest station from the old list.
def get_keys(self, keymap): keys = dict(modifiers=[], regular=[]) for keymap_index, keymap_byte in enumerate(keymap): try: keymap_values = self._keymap_values_dict[keymap_index] except KeyError: continue for key, value in keymap_values.items(): if not keymap_byte & key: continue elif value in self._modifiers: keys['modifiers'].append(value) elif not keys['regular']: keys['regular'].append(value) return keys
Extract keys pressed from transformed keymap
def as_currency(self, currency='USD', locale=LOCALE_OBJ, *args, **kwargs): f = Formatter( as_currency(currency=currency, locale=locale), args, kwargs ) return self._add_formatter(f)
Format subset as currency :param currency: Currency :param locale: Babel locale for currency formatting :param subset: Pandas subset
def validate_context(self): if self.context and len(self.context) != len(set(self.context)): LOGGER.error('Cannot have duplicated context objects') raise Exception('Cannot have duplicated context objects.')
Make sure there are no duplicate context objects or we might end up with switched data Converting the tuple to a set gets rid of the eventual duplicate objects, comparing the length of the original tuple and set tells us if we have duplicates in the tuple or not
def should_add_ServerHello(self): if isinstance(self.mykey, PrivKeyRSA): kx = "RSA" elif isinstance(self.mykey, PrivKeyECDSA): kx = "ECDSA" usable_suites = get_usable_ciphersuites(self.cur_pkt.ciphers, kx) c = usable_suites[0] if self.preferred_ciphersuite in usable_suites: c = self.preferred_ciphersuite self.add_msg(TLSServerHello(cipher=c)) raise self.ADDED_SERVERHELLO()
Selecting a cipher suite should be no trouble as we already caught the None case previously. Also, we do not manage extensions at all.
def gopro_heartbeat_send(self, status, capture_mode, flags, force_mavlink1=False): return self.send(self.gopro_heartbeat_encode(status, capture_mode, flags), force_mavlink1=force_mavlink1)
Heartbeat from a HeroBus attached GoPro status : Status (uint8_t) capture_mode : Current capture mode (uint8_t) flags : additional status bits (uint8_t)
def get_client_entry(self, client_entry_name): request_url = self._build_url(['ClientEntry', client_entry_name]) return self._do_request('GET', request_url)
Returns a specific client entry name details from CPNR server.
def login(self, user: str, passwd: str) -> None: self.context.login(user, passwd)
Log in to instagram with given username and password and internally store session object. :raises InvalidArgumentException: If the provided username does not exist. :raises BadCredentialsException: If the provided password is wrong. :raises ConnectionException: If connection to Instagram failed. :raises TwoFactorAuthRequiredException: First step of 2FA login done, now call :meth:`Instaloader.two_factor_login`.
def parse_k(self,k,vals): try: k = int(k) except: pass else: assert k in vals,"k {0} not in vals".format(k) return [k] if k is None: return vals else: try: k_vals = vals[k] except Exception as e: raise Exception("error slicing vals with {0}:{1}". format(k,str(e))) return k_vals
parse the iterable from a property or boundary condition argument Parameters ---------- k : int or iterable int the iterable vals : iterable of ints the acceptable values that k may contain Returns ------- k_vals : iterable of int parsed k values
def rename_keys(d, keymap=None, list_of_dicts=False, deepcopy=True): list_of_dicts = '__list__' if list_of_dicts else None keymap = {} if keymap is None else keymap flatd = flatten(d, list_of_dicts=list_of_dicts) flatd = { tuple([keymap.get(k, k) for k in path]): v for path, v in flatd.items() } return unflatten(flatd, list_of_dicts=list_of_dicts, deepcopy=deepcopy)
rename keys in dict Parameters ---------- d : dict keymap : dict dictionary of key name mappings list_of_dicts: bool treat list of dicts as additional branches deepcopy: bool deepcopy values Examples -------- >>> from pprint import pprint >>> d = {'a':{'old_name':1}} >>> pprint(rename_keys(d,{'old_name':'new_name'})) {'a': {'new_name': 1}}
def gamma(phi1,phi2,theta1,theta2): if phi1 == phi2 and theta1 == theta2: gamma = 0 else: gamma = atan( sin(theta2)*sin(phi2-phi1) / \ (cos(theta1)*sin(theta2)*cos(phi1-phi2) - \ sin(theta1)*cos(theta2)) ) dummy_arg = (cos(gamma)*cos(theta1)*sin(theta2)*cos(phi1-phi2) + \ sin(gamma)*sin(theta2)*sin(phi2-phi1) - \ cos(gamma)*sin(theta1)*cos(theta2)) if dummy_arg >= 0: return gamma else: return pi + gamma
calculate third rotation angle inputs are angles from 2 pulsars returns the angle.
def load_csv_stream(ctx, model, data, header=None, header_exclude=None, **fmtparams): _header, _rows = read_csv(data, **fmtparams) header = header if header else _header if _rows: if header != _header and not header_exclude: header_exclude = [x for x in _header if x not in header] if header_exclude: header = [x for x in header if x not in header_exclude] pop_idxs = [_header.index(x) for x in header_exclude] rows = [] for i, row in enumerate(_rows): rows.append( [x for j, x in enumerate(row) if j not in pop_idxs] ) else: rows = list(_rows) if rows: load_rows(ctx, model, header, rows)
Load a CSV from a stream. :param ctx: current anthem context :param model: model name as string or model klass :param data: csv data to load :param header: csv fieldnames whitelist :param header_exclude: csv fieldnames blacklist Usage example:: from pkg_resources import Requirement, resource_stream req = Requirement.parse('my-project') load_csv_stream(ctx, ctx.env['res.users'], resource_stream(req, 'data/users.csv'), delimiter=',')
def port_channel_minimum_links(self, **kwargs): name = str(kwargs.pop('name')) minimum_links = str(kwargs.pop('minimum_links')) callback = kwargs.pop('callback', self._callback) min_links_args = dict(name=name, minimum_links=minimum_links) if not pynos.utilities.valid_interface('port_channel', name): raise ValueError("`name` must match `^[0-9]{1,3}${1,3}$`") config = getattr( self._interface, 'interface_port_channel_minimum_links' )(**min_links_args) return callback(config)
Set minimum number of links in a port channel. Args: name (str): Port-channel number. (1, 5, etc) minimum_links (str): Minimum number of links in channel group. callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `name` or `minimum_links` is not specified. ValueError: if `name` is not a valid value. Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.port_channel_minimum_links( ... name='1', minimum_links='2') ... dev.interface.port_channel_minimum_links() ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError
def initialize_simulants(self): super().initialize_simulants() self._initial_population = self.population.get_population(True)
Initialize this simulation's population. Should not be called directly.
def rados_parse_df(self, result): parsed_results = [] HEADING = r".*(pool name) *(category) *(KB) *(objects) *(clones)" + \ " *(degraded) *(unfound) *(rd) *(rd KB) *(wr) *(wr KB)" HEADING_RE = re.compile(HEADING, re.IGNORECASE) dict_keys = ["pool_name", "category", "size_kb", "objects", "clones", "degraded", "unfound", "rd", "rd_kb", "wr", "wr_kb"] if result['contacted'].keys(): for node in result['contacted'].keys(): df_result = {} nodeobj = result['contacted'][node] df_output = nodeobj['stdout'] for line in df_output.splitlines(): print "Line: ", line reobj = HEADING_RE.match(line) if not reobj: row = line.split() if len(row) != len(dict_keys): print "line not match: ", line continue key_count = 0 for column in row: df_result[dict_keys[key_count]] = column key_count += 1 print "df_result: ", df_result parsed_results.append(df_result) nodeobj['parsed_results'] = parsed_results return result
Parse the result from ansirunner module and save it as a json object
def bind(node=None, source=None, destination=None, edge_title=None, edge_label=None, edge_color=None, edge_weight=None, point_title=None, point_label=None, point_color=None, point_size=None): from . import plotter return plotter.Plotter().bind(source, destination, node, \ edge_title, edge_label, edge_color, edge_weight, \ point_title, point_label, point_color, point_size)
Create a base plotter. Typically called at start of a program. For parameters, see ``plotter.bind()`` . :returns: Plotter. :rtype: Plotter. **Example** :: import graphistry g = graphistry.bind()
def _gen_key(self, key): b_key = self._md5_digest(key) return self._hashi(b_key, lambda x: x)
Return long integer for a given key, that represent it place on the hash ring.