code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def rnd_date_list_high_performance(size, start=date(1970, 1, 1), end=None, **kwargs): if end is None: end = date.today() start_days = to_ordinal(parser.parse_datetime(start)) end_days = to_ordinal(parser.parse_datetime(end)) _assert_correct_start_end(start_days, end_days) if has_np: return [ from_ordinal(days) for days in np.random.randint(start_days, end_days, size) ] else: return [ from_ordinal(random.randint(start_days, end_days)) for _ in range(size) ]
Generate mass random date. :param size: int, number of :param start: date similar object, int / str / date / datetime :param end: date similar object, int / str / date / datetime, default today's date :param kwargs: args placeholder :return: list of datetime.date
def start_event_stream(self): if not self._stream: self._stream = GerritStream(self, ssh_client=self._ssh_client) self._stream.start()
Start streaming events from `gerrit stream-events`.
def add_probe(self, probe): if self.probes: probe_last = self.probes[-1] if not probe.ip: probe.ip = probe_last.ip probe.name = probe_last.name self.probes.append(probe)
Adds a Probe instance to this hop's results.
def compare_dicts(d1, d2): a = json.dumps(d1, indent=4, sort_keys=True) b = json.dumps(d2, indent=4, sort_keys=True) diff = ('\n' + '\n'.join(difflib.ndiff( a.splitlines(), b.splitlines()))) return diff
Returns a diff string of the two dicts.
def qpinfo(): parser = qpinfo_parser() args = parser.parse_args() path = pathlib.Path(args.path).resolve() try: ds = load_data(path) except UnknownFileFormatError: print("Unknown file format: {}".format(path)) return print("{} ({})".format(ds.__class__.__doc__, ds.__class__.__name__)) print("- number of images: {}".format(len(ds))) for key in ds.meta_data: print("- {}: {}".format(key, ds.meta_data[key]))
Print information of a quantitative phase imaging dataset
def get_uuid(type=4): import uuid name = 'uuid'+str(type) u = getattr(uuid, name) return u().hex
Get uuid value
def write_to(self, f, version = None): if not version: version = self.version if version == 1: header = "%(url)s %(ip_address)s %(date)s %(content_type)s %(length)s" elif version == 2: header = "%(url)s %(ip_address)s %(date)s %(content_type)s %(result_code)s %(checksum)s %(location)s %(offset)s %(filename)s %(length)s" header = header%dict(url = self['url'], ip_address = self['ip_address'], date = self['date'], content_type = self['content_type'], result_code = self['result_code'], checksum = self['checksum'], location = self['location'], offset = self['offset'], filename = self['filename'], length = self['length']) f.write(header)
Writes out the arc header to the file like object `f`. If the version field is 1, it writes out an arc v1 header, otherwise (and this is default), it outputs a v2 header.
def unique_file_name(base_name, extension=''): idcount = 0 if extension and not extension.startswith('.'): extension = '.%s' % extension fname = base_name + extension while os.path.exists(fname): fname = "%s-%d%s" % (base_name, idcount, extension) idcount += 1 return fname
Creates a unique file name based on the specified base name. @base_name - The base name to use for the unique file name. @extension - The file extension to use for the unique file name. Returns a unique file string.
def _log_every_n_to_logger(n, logger, level, message, *args): logger = logger or logging.getLogger() def _gen(): while True: for _ in range(n): yield False logger.log(level, message, *args) yield True gen = _gen() return lambda: six.next(gen)
Logs the given message every n calls to a logger. Args: n: Number of calls before logging. logger: The logger to which to log. level: The logging level (e.g. logging.INFO). message: A message to log *args: Any format args for the message. Returns: A method that logs and returns True every n calls.
def _opening_bracket_index(self, text, bpair=('(', ')')): level = 1 for i, char in enumerate(reversed(text[:-1])): if char == bpair[1]: level += 1 elif char == bpair[0]: level -= 1 if level == 0: return len(text) - i - 2
Return the index of the opening bracket that matches the closing bracket at the end of the text.
def create_zone(zone, private=False, vpc_id=None, vpc_region=None, region=None, key=None, keyid=None, profile=None): if region is None: region = 'universal' if private: if not vpc_id or not vpc_region: msg = 'vpc_id and vpc_region must be specified for a private zone' raise SaltInvocationError(msg) conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) _zone = conn.get_zone(zone) if _zone: return False conn.create_zone(zone, private_zone=private, vpc_id=vpc_id, vpc_region=vpc_region) return True
Create a Route53 hosted zone. .. versionadded:: 2015.8.0 zone DNS zone to create private True/False if the zone will be a private zone vpc_id VPC ID to associate the zone to (required if private is True) vpc_region VPC Region (required if private is True) region region endpoint to connect to key AWS key keyid AWS keyid profile AWS pillar profile CLI Example:: salt myminion boto_route53.create_zone example.org
def word(cap=False): syllables = [] for x in range(random.randint(2,3)): syllables.append(_syllable()) word = "".join(syllables) if cap: word = word[0].upper() + word[1:] return word
This function generates a fake word by creating between two and three random syllables and then joining them together.
def pipe_exchangerate(context=None, _INPUT=None, conf=None, **kwargs): offline = conf.get('offline', {}).get('value') rate_data = get_offline_rate_data(err=False) if offline else get_rate_data() rates = parse_request(rate_data) splits = get_splits(_INPUT, conf, **cdicts(opts, kwargs)) parsed = utils.dispatch(splits, *get_dispatch_funcs()) _OUTPUT = starmap(partial(parse_result, rates=rates), parsed) return _OUTPUT
A string module that retrieves the current exchange rate for a given currency pair. Loopable. Parameters ---------- context : pipe2py.Context object _INPUT : iterable of items or strings (base currency) conf : { 'quote': {'value': <'USD'>}, 'default': {'value': <'USD'>}, 'offline': {'type': 'bool', 'value': '0'}, } Returns ------- _OUTPUT : generator of hashed strings
def to_cfn_resource_name(name): if not name: raise ValueError("Invalid name: %r" % name) word_separators = ['-', '_'] for word_separator in word_separators: word_parts = [p for p in name.split(word_separator) if p] name = ''.join([w[0].upper() + w[1:] for w in word_parts]) return re.sub(r'[^A-Za-z0-9]+', '', name)
Transform a name to a valid cfn name. This will convert the provided name to a CamelCase name. It's possible that the conversion to a CFN resource name can result in name collisions. It's up to the caller to handle name collisions appropriately.
def xmoe_2d(): hparams = xmoe_top_2() hparams.decoder_layers = ["att", "hmoe"] * 4 hparams.mesh_shape = "b0:2;b1:4" hparams.outer_batch_size = 4 hparams.layout = "outer_batch:b0;inner_batch:b1,expert_x:b1,expert_y:b0" hparams.moe_num_experts = [4, 4] return hparams
Two-dimensional hierarchical mixture of 16 experts.
def get_experiment_from_id(self, experiment_id): experiment = self.experiment_id_map.get(experiment_id) if experiment: return experiment self.logger.error('Experiment ID "%s" is not in datafile.' % experiment_id) self.error_handler.handle_error(exceptions.InvalidExperimentException(enums.Errors.INVALID_EXPERIMENT_KEY_ERROR)) return None
Get experiment for the provided experiment ID. Args: experiment_id: Experiment ID for which experiment is to be determined. Returns: Experiment corresponding to the provided experiment ID.
def _send_ack(self, transaction): ack = Message() ack.type = defines.Types['ACK'] if not transaction.request.acknowledged and transaction.request.type == defines.Types["CON"]: ack = self._messageLayer.send_empty(transaction, transaction.request, ack) self.send_datagram(ack)
Sends an ACK message for the request. :param transaction: the transaction that owns the request
def are_equal(self, sp1, sp2): set1 = set(sp1.elements) set2 = set(sp2.elements) return set1.issubset(set2) or set2.issubset(set1)
True if there is some overlap in composition between the species Args: sp1: First species. A dict of {specie/element: amt} as per the definition in Site and PeriodicSite. sp2: Second species. A dict of {specie/element: amt} as per the definition in Site and PeriodicSite. Returns: True always
def _check_repos(self, repos): self._checking_repos = [] self._valid_repos = [] for repo in repos: worker = self.download_is_valid_url(repo) worker.sig_finished.connect(self._repos_checked) worker.repo = repo self._checking_repos.append(repo)
Check if repodata urls are valid.
def _resolve_dtype(data_type): if isinstance(data_type, _FIXED_ATOMIC): out = _get_atomic_dtype(data_type) elif isinstance(data_type, _FLEXIBLE_ATOMIC): out = (_get_atomic_dtype(data_type), data_type.length) elif isinstance(data_type, Array): shape = data_type.shape if isinstance(shape, _SEQUENCE_TYPES) and len(shape) == 1: shape = shape[0] out = (_resolve_dtype(data_type.element_type), shape) elif isinstance(data_type, Structure): out = [(field.name, _resolve_dtype(field.type)) for field in data_type.fields] return out
Retrieve the corresponding NumPy's `dtype` for a given data type.
def find_key_by_subkey(self, subkey): for key in self.list_keys(): for sub in key['subkeys']: if sub[0] == subkey: return key raise LookupError( "GnuPG public key for subkey %s not found!" % subkey)
Find a key by a fingerprint of one of its subkeys. :param str subkey: The fingerprint of the subkey to search for.
def registry_key(self, key_name, value_name, value_type, **kwargs): indicator_obj = RegistryKey(key_name, value_name, value_type, **kwargs) return self._indicator(indicator_obj)
Add Registry Key data to Batch object. Args: key_name (str): The key_name value for this Indicator. value_name (str): The value_name value for this Indicator. value_type (str): The value_type value for this Indicator. confidence (str, kwargs): The threat confidence for this Indicator. date_added (str, kwargs): The date timestamp the Indicator was created. last_modified (str, kwargs): The date timestamp the Indicator was last modified. rating (str, kwargs): The threat rating for this Indicator. xid (str, kwargs): The external id for this Indicator. Returns: obj: An instance of Registry Key.
def load_module(module_name, module_path): if sys.version_info >= (3,0): import pyximport pyximport.install() sys.path.append(module_path) return __import__(module_name) else: import imp module_info = imp.find_module(module_name, [module_path]) return imp.load_module(module_name, *module_info)
Load the module named `module_name` from `module_path` independently of the Python version.
def current(self): top = super(Transaction, self).current() if isinstance(top, Transaction): return top
Return the topmost transaction. .. note:: If the topmost element on the stack is not a transaction, returns None. :rtype: :class:`google.cloud.datastore.transaction.Transaction` or None :returns: The current transaction (if any are active).
def _get_digit_list(alist: [str], from_index: int) -> ([str], [str]): ret = [] alist.pop(from_index) while len(alist) > from_index and alist[from_index].isdigit(): ret.append(alist.pop(from_index)) return alist, ret
Returns a list of items removed from a given list of strings that are all digits from 'from_index' until hitting a non-digit item
def add_root_family(self, family_id): if self._catalog_session is not None: return self._catalog_session.add_root_catalog(catalog_id=family_id) return self._hierarchy_session.add_root(id_=family_id)
Adds a root family. arg: family_id (osid.id.Id): the ``Id`` of a family raise: AlreadyExists - ``family_id`` is already in hierarchy raise: NotFound - ``family_id`` not found raise: NullArgument - ``family_id`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure *compliance: mandatory -- This method must be implemented.*
def dim_reduce_data(data, d): genes, cells = data.shape distances = np.zeros((cells, cells)) for i in range(cells): for j in range(cells): distances[i,j] = poisson_dist(data[:,i], data[:,j]) proximity = distances**2 J = np.eye(cells) - 1./cells B = -0.5*np.dot(J, np.dot(proximity, J)) e_val, e_vec = np.linalg.eigh(B) lam = np.diag(e_val[-d:])[::-1] E = e_vec[:,-d:][::-1] X = np.dot(E, lam**0.5) return X
Does a MDS on the data directly, not on the means. Args: data (array): genes x cells d (int): desired dimensionality Returns: X, a cells x d matrix
def expand(directory: str) -> str: temp1 = os.path.expanduser(directory) return os.path.expandvars(temp1)
Apply expanduser and expandvars to directory to expand '~' and env vars.
def iter(self, columnnames, order='', sort=True): from .tableiter import tableiter return tableiter(self, columnnames, order, sort)
Return a tableiter object. :class:`tableiter` lets one iterate over a table by returning in each iteration step a reference table containing equal values for the given columns. By default a sort is done on the given columns to get the correct iteration order. `order` | 'ascending' is iterate in ascending order (is the default). | 'descending' is iterate in descending order. `sort=False` do not sort (because table is already in correct order). For example, iterate by time through a measurementset table:: t = table('3c343.MS') for ts in t.iter('TIME'): print ts.nrows()
def _get_log_format(self, request): user = getattr(request, 'user', None) if not user: return if not request.user.is_authenticated: return method = request.method.upper() if not (method in self.target_methods): return request_url = urlparse.unquote(request.path) for rule in self._ignored_urls: if rule.search(request_url): return return self.format
Return operation log format.
def list_(): ret = {} cmd = 'cpan -l' out = __salt__['cmd.run'](cmd).splitlines() for line in out: comps = line.split() ret[comps[0]] = comps[1] return ret
List installed Perl modules, and the version installed CLI Example: .. code-block:: bash salt '*' cpan.list
def _request(self, http_method, relative_url='', **kwargs): relative_url = self._remove_leading_slash(relative_url) new_kwargs = self.default_kwargs().copy() custom_kwargs = self.before_request( http_method, relative_url, kwargs.copy() ) new_kwargs.update(custom_kwargs) response = requests.request( http_method, self._join_url(relative_url), **new_kwargs ) return self.after_request(response)
Does actual HTTP request using requests library.
def registerExitCall(): r if state.isExitHooked: return state.isExitHooked = True from atexit import register register(core.start)
r"""Registers an exit call to start the core. The core would be started after the main module is loaded. Ec would be exited from the core.
def trim(self): if 0 < self.cutoff <= 0.5: pscore = self.raw_data['pscore'] keep = (pscore >= self.cutoff) & (pscore <= 1-self.cutoff) Y_trimmed = self.raw_data['Y'][keep] D_trimmed = self.raw_data['D'][keep] X_trimmed = self.raw_data['X'][keep] self.raw_data = Data(Y_trimmed, D_trimmed, X_trimmed) self.raw_data._dict['pscore'] = pscore[keep] self.summary_stats = Summary(self.raw_data) self.strata = None self.estimates = Estimators() elif self.cutoff == 0: pass else: raise ValueError('Invalid cutoff.')
Trims data based on propensity score to create a subsample with better covariate balance. The default cutoff value is set to 0.1. To set a custom cutoff value, modify the object attribute named cutoff directly. This method should only be executed after the propensity score has been estimated.
def generate_tensor_filename(self, field_name, file_num, compressed=True): file_ext = TENSOR_EXT if compressed: file_ext = COMPRESSED_TENSOR_EXT filename = os.path.join(self.filename, 'tensors', '%s_%05d%s' %(field_name, file_num, file_ext)) return filename
Generate a filename for a tensor.
def deconstruct(self): name, path, args, kwargs = super(CountryField, self).deconstruct() kwargs.pop("choices") if self.multiple: kwargs["multiple"] = self.multiple if self.countries is not countries: kwargs["countries"] = self.countries.__class__ return name, path, args, kwargs
Remove choices from deconstructed field, as this is the country list and not user editable. Not including the ``blank_label`` property, as this isn't database related.
def from_string(cls, string): if string in units.UNITS_BY_ALL: return cls(description=string, unit=units.Unit(string)) else: return cls(description=string)
Convert a string into a Dimension
def _build_field_choices(self, fields): return tuple(sorted( [(fquery, capfirst(fname)) for fquery, fname in fields.items()], key=lambda f: f[1].lower()) ) + self.FIELD_CHOICES
Iterate over passed model fields tuple and update initial choices.
def longest_one_seg_prefix(self, word): for i in range(self.longest_seg, 0, -1): if word[:i] in self.seg_dict: return word[:i] return ''
Return longest Unicode IPA prefix of a word Args: word (unicode): input word as Unicode IPA string Returns: unicode: longest single-segment prefix of `word` in database
def check_password_confirm(self, form, trigger_action_group=None): pwcol = self.options['password_column'] pwconfirmfield = pwcol + "_confirm" if pwcol in form and pwconfirmfield in form and form[pwconfirmfield].data != form[pwcol].data: if self.options["password_confirm_failed_message"]: flash(self.options["password_confirm_failed_message"], "error") current_context.exit(trigger_action_group=trigger_action_group)
Checks that the password and the confirm password match in the provided form. Won't do anything if any of the password fields are not in the form.
def _setup_buffer(self): if not self._buffer_cfg or not isinstance(self._buffer_cfg, dict): return buffer_name = list(self._buffer_cfg.keys())[0] buffer_class = napalm_logs.buffer.get_interface(buffer_name) log.debug('Setting up buffer interface "%s"', buffer_name) if 'expire_time' not in self._buffer_cfg[buffer_name]: self._buffer_cfg[buffer_name]['expire_time'] = CONFIG.BUFFER_EXPIRE_TIME self._buffer = buffer_class(**self._buffer_cfg[buffer_name])
Setup the buffer subsystem.
def _connect(self): "Create a Unix domain socket connection" sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.settimeout(self.socket_timeout) sock.connect(self.path) return sock
Create a Unix domain socket connection
def truncate_rationale(rationale, max_length=MAX_RATIONALE_SIZE_IN_EVENT): if isinstance(rationale, basestring) and max_length is not None and len(rationale) > max_length: return rationale[0:max_length], True else: return rationale, False
Truncates the rationale for analytics event emission if necessary Args: rationale (string): the string value of the rationale max_length (int): the max length for truncation Returns: truncated_value (string): the possibly truncated version of the rationale was_truncated (bool): returns true if the rationale is truncated
def _get_module(self, line): for sline in self._source: if len(sline) > 0 and sline[0] != "!": rmatch = self.RE_MODULE.match(sline) if rmatch is not None: self.modulename = rmatch.group("name") break else: return self.parser.isense_parse(self._orig_path, self.modulename) self.module = self.parser.modules[self.modulename] if line > self.module.contains_index: self.section = "contains"
Finds the name of the module and retrieves it from the parser cache.
def nth_value(expr, nth, skip_nulls=False, sort=None, ascending=True): return _cumulative_op(expr, NthValue, data_type=expr._data_type, sort=sort, ascending=ascending, _nth=nth, _skip_nulls=skip_nulls)
Get nth value of a grouped and sorted expression. :param expr: expression for calculation :param nth: integer position :param skip_nulls: whether to skip null values, False by default :param sort: name of the sort column :param ascending: whether to sort in ascending order :return: calculated column
def crypto_pwhash_str_alg(passwd, opslimit, memlimit, alg): ensure(isinstance(opslimit, integer_types), raising=TypeError) ensure(isinstance(memlimit, integer_types), raising=TypeError) ensure(isinstance(passwd, bytes), raising=TypeError) _check_argon2_limits_alg(opslimit, memlimit, alg) outbuf = ffi.new("char[]", 128) ret = lib.crypto_pwhash_str_alg(outbuf, passwd, len(passwd), opslimit, memlimit, alg) ensure(ret == 0, 'Unexpected failure in key derivation', raising=exc.RuntimeError) return ffi.string(outbuf)
Derive a cryptographic key using the ``passwd`` given as input and a random ``salt``, returning a string representation which includes the salt, the tuning parameters and the used algorithm. :param passwd: The input password :type passwd: bytes :param opslimit: computational cost :type opslimit: int :param memlimit: memory cost :type memlimit: int :param alg: The algorithm to use :type alg: int :return: serialized derived key and parameters :rtype: bytes
def WritePythonFile(file_descriptor, package, version, printer): _WriteFile(file_descriptor, package, version, _ProtoRpcPrinter(printer))
Write the given extended file descriptor to out.
def init_config(self): input_fpath = os.path.join(self.work_path, 'input.nml') input_nml = f90nml.read(input_fpath) if self.expt.counter == 0 or self.expt.repeat_run: input_type = 'n' else: input_type = 'r' input_nml['MOM_input_nml']['input_filename'] = input_type f90nml.write(input_nml, input_fpath, force=True)
Patch input.nml as a new or restart run.
def _compute_radii(self): radii = self._get_user_components('radii') if (radii is None): centers = self.components_['centers'] n_centers = centers.shape[0] max_dist = np.max(pairwise_distances(centers)) radii = np.ones(n_centers) * max_dist/sqrt(2.0 * n_centers) self.components_['radii'] = radii
Generate RBF radii
def assert_visible(self, selector, testid=None, **kwargs): self.info_log( "Assert visible selector(%s) testid(%s)" % (selector, testid) ) highlight = kwargs.get( 'highlight', BROME_CONFIG['highlight']['highlight_on_assertion_success'] ) self.debug_log("effective highlight: %s" % highlight) wait_until_visible = kwargs.get( 'wait_until_visible', BROME_CONFIG['proxy_driver']['wait_until_visible_before_assert_visible'] ) self.debug_log("effective wait_until_visible: %s" % wait_until_visible) if wait_until_visible: self.wait_until_visible(selector, raise_exception=False) element = self.find( selector, raise_exception=False, wait_until_visible=False, wait_until_present=False ) if element and element.is_displayed(raise_exception=False): if highlight: element.highlight( style=BROME_CONFIG['highlight']['style_on_assertion_success'] ) if testid is not None: self.create_test_result(testid, True) return True else: if testid is not None: self.create_test_result(testid, False) return False
Assert that the element is visible in the dom Args: selector (str): the selector used to find the element testid (str): the test_id or a str Kwargs: wait_until_visible (bool) highlight (bool) Returns: bool: True is the assertion succeed; False otherwise.
def toggle_value(request, name): obj = service.system.namespace.get(name, None) if not obj or service.read_only: raise Http404 new_status = obj.status = not obj.status if service.redirect_from_setters: return HttpResponseRedirect(reverse('set_ready', args=(name, new_status))) else: return set_ready(request, name, new_status)
For manual shortcut links to perform toggle actions
def set_subnet_name(name): cmd = 'systemsetup -setlocalsubnetname "{0}"'.format(name) __utils__['mac_utils.execute_return_success'](cmd) return __utils__['mac_utils.confirm_updated']( name, get_subnet_name, )
Set the local subnet name :param str name: The new local subnet name .. note:: Spaces are changed to dashes. Other special characters are removed. :return: True if successful, False if not :rtype: bool CLI Example: .. code-block:: bash The following will be set as 'Mikes-Mac' salt '*' system.set_subnet_name "Mike's Mac"
def save(self): kwargs = {} if self.id3v23: id3 = self.mgfile if hasattr(id3, 'tags'): id3 = id3.tags id3.update_to_v23() kwargs['v2_version'] = 3 mutagen_call('save', self.path, self.mgfile.save, **kwargs)
Write the object's tags back to the file. May throw `UnreadableFileError`.
def get(self, robj, r=None, pr=None, timeout=None, basic_quorum=None, notfound_ok=None, head_only=False): msg_code = riak.pb.messages.MSG_CODE_GET_REQ codec = self._get_codec(msg_code) msg = codec.encode_get(robj, r, pr, timeout, basic_quorum, notfound_ok, head_only) resp_code, resp = self._request(msg, codec) return codec.decode_get(robj, resp)
Serialize get request and deserialize response
def get_books(self): if self.retrieved: raise errors.IllegalState('List has already been retrieved.') self.retrieved = True return objects.BookList(self._results, runtime=self._runtime)
Gets the book list resulting from a search. return: (osid.commenting.BookList) - the book list raise: IllegalState - list has already been retrieved *compliance: mandatory -- This method must be implemented.*
def markdown(text, html=False, valid_tags=GFM_TAGS): if text is None: return None if html: return Markup(sanitize_html(markdown_convert_html(gfm(text)), valid_tags=valid_tags)) else: return Markup(markdown_convert_text(gfm(text)))
Return Markdown rendered text using GitHub Flavoured Markdown, with HTML escaped and syntax-highlighting enabled.
def validate(self, value): try: if value: v = float(value) if (v != 0 and v < self.fmin) or v > self.fmax: return None if abs(round(100000*v)-100000*v) > 1.e-12: return None return value except ValueError: return None
This prevents setting any value more precise than 0.00001
def get(self, *args, **kwargs): self.before_get(args, kwargs) qs = QSManager(request.args, self.schema) objects_count, objects = self.get_collection(qs, kwargs) schema_kwargs = getattr(self, 'get_schema_kwargs', dict()) schema_kwargs.update({'many': True}) self.before_marshmallow(args, kwargs) schema = compute_schema(self.schema, schema_kwargs, qs, qs.include) result = schema.dump(objects).data view_kwargs = request.view_args if getattr(self, 'view_kwargs', None) is True else dict() add_pagination_links(result, objects_count, qs, url_for(self.view, _external=True, **view_kwargs)) result.update({'meta': {'count': objects_count}}) final_result = self.after_get(result) return final_result
Retrieve a collection of objects
def latch_file_info(self, args): self.file_dict.clear() for key, val in self.file_args.items(): try: file_path = args[key] if file_path is None: continue if key[0:4] == 'args': if isinstance(file_path, list): tokens = file_path elif isinstance(file_path, str): tokens = file_path.split() else: raise TypeError( "Args has type %s, expect list or str" % type(file_path)) for token in tokens: self.file_dict[token.replace('.gz', '')] = val else: self.file_dict[file_path.replace('.gz', '')] = val except KeyError: pass
Extract the file paths from a set of arguments
def conversations(self): body = { "conversation_type": self.conversation_type, "audience_definition": self.audience_definition, "targeting_inputs": self.targeting_inputs } return self.__get(account=self.account, client=self.account.client, params=json.dumps(body))
Get the conversation topics for an input targeting criteria
def OnCellBorderWidth(self, event): with undo.group(_("Border width")): self.grid.actions.set_border_attr("borderwidth", event.width, event.borders) self.grid.ForceRefresh() self.grid.update_attribute_toolbar() event.Skip()
Cell border width event handler
def account_xdr_object(self): return Xdr.types.PublicKey(Xdr.const.KEY_TYPE_ED25519, self.verifying_key.to_bytes())
Create PublicKey XDR object via public key bytes. :return: Serialized XDR of PublicKey type.
def dar_nombre_campo_dbf(clave, claves): "Reducir nombre de campo a 10 caracteres, sin espacios ni _, sin repetir" nombre = clave.replace("_","")[:10] i = 0 while nombre in claves: i += 1 nombre = nombre[:9] + str(i) return nombre.lower()
Reducir nombre de campo a 10 caracteres, sin espacios ni _, sin repetir
def get_all_trials(self): response = requests.get(urljoin(self._path, "trials")) return self._deserialize(response)
Returns a list of all trials' information.
def check_config_options(_class, required_options, optional_options, options): for opt in required_options: if opt not in options: msg = "Required option missing: {0}" raise ConfigurationError(msg.format(opt)) for opt in options: if opt not in (required_options + optional_options): msg = "Unknown config option to `{0}`: {1}" _logger.warn(msg.format(_class, opt))
Helper method to check options. Arguments: _class -- the original class that takes received the options. required_options -- the options that are required. If they are not present, a ConfigurationError is raised. Given as a tuple. optional_options -- the options that are optional. Given options that are not present in `optional_options` nor in `required_options` will be logged as unrecognized. Given as a tuple. options -- a dictionary of given options. Raises: ConfigurationError -- if any required option is missing.
def get_fieldset_index(fieldsets, index_or_name): if isinstance(index_or_name, six.integer_types): return index_or_name for key, value in enumerate(fieldsets): if value[0] == index_or_name: return key raise KeyError("Key not found: '{}'.".format(index_or_name))
Return the index of a fieldset in the ``fieldsets`` list. Args: fieldsets (list): The original ``fieldsets`` list. index_or_name (int or str): The value of the reference element, or directly its numeric index. Returns: (int) The index of the fieldset in the ``fieldsets`` list.
def commandify(use_argcomplete=False, exit=True, *args, **kwargs): parser = CommandifyArgumentParser(*args, **kwargs) parser.setup_arguments() if use_argcomplete: try: import argcomplete except ImportError: print('argcomplete not installed, please install it.') parser.exit(status=2) argcomplete.autocomplete(parser) args = parser.parse_args() if exit: parser.dispatch_commands() parser.exit(0) else: return parser.dispatch_commands()
Turns decorated functions into command line args Finds the main_command and all commands and generates command line args from these.
def progress_task(name=None, t=INFO, max_value=100, *args, **kwargs): return task(name=name, t=t, init_progress=True, max_value=max_value, *args, **kwargs)
This decorator extends the basic @task decorator by allowing users to display some form of progress on the console. The module can receive an increment in the progress through "tick_progress".
def _write_index_file(data_dir): cached_words = [ w for w in _get_words(data_dir) if data_dir.joinpath("translations/{}.html".format(w)).is_file() ] content_str = _create_index_content(cached_words) html_string = HTML_TEMPLATE.replace("{% word %}", "Index") html_string = html_string.replace("{% content %}", content_str) filename = data_dir.joinpath("index.html") save_file(filename, html_string, mk_parents=True)
Create index file of cached translations. Parameters ---------- data_dir : pathlib.Path Cache directory location.
def _parseImageNtHeaders(self, data, imageDosHeader): inth = self._classes.IMAGE_NT_HEADERS.from_buffer(data, imageDosHeader.header.e_lfanew) if inth.Signature != b'PE': raise BinaryError('No valid PE/COFF file') return ImageNtHeaderData(header=inth)
Returns the ImageNtHeaders
def print_settings(settings, depth=0): if isinstance(settings, Setting): settings = [settings] for setting in settings: cur = setting.currentValue print( "%s* %s (%s, value: %s, type: %s)" % ( " " * depth, setting.title, setting.target, click.style(cur, bold=True), setting.type, ) ) for opt in setting.candidate: if not opt.isAvailable: logging.debug("Unavailable setting %s", opt) continue click.echo( click.style( "%s - %s (%s)" % (" " * depth, opt.title, opt.value), bold=opt.value == cur, ) )
Print all available settings of the device.
def add_screenshot(self, screenshot): if screenshot in self.screenshots: return self.screenshots.append(screenshot)
Add a screenshot object if it does not already exist
def get_python_path(venv_path): bin_path = get_bin_path(venv_path) program_path = os.path.join(bin_path, 'python') if sys.platform.startswith('win'): program_path = program_path + '.exe' return program_path
Get given virtual environment's `python` program path. :param venv_path: Virtual environment directory path. :return: `python` program path.
def execute_sql(self, query): c = self.con.cursor() c.execute(query) result = [] if c.rowcount > 0: try: result = c.fetchall() except psycopg2.ProgrammingError: pass return result
Executes a given query string on an open postgres database.
def minimise_tables(routing_tables, target_lengths, methods=(remove_default_entries, ordered_covering)): if not isinstance(target_lengths, dict): lengths = collections.defaultdict(lambda: target_lengths) else: lengths = target_lengths new_tables = dict() for chip, table in iteritems(routing_tables): try: new_table = minimise_table(table, lengths[chip], methods) except MinimisationFailedError as exc: exc.chip = chip raise if new_table: new_tables[chip] = new_table return new_tables
Utility function which attempts to minimises routing tables for multiple chips. For each routing table supplied, this function will attempt to use the minimisation algorithms given (or some sensible default algorithms), trying each sequentially until a target number of routing entries has been reached. Parameters ---------- routing_tables : {(x, y): [\ :py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...} Dictionary mapping chip co-ordinates to the routing tables associated with that chip. NOTE: This is the data structure as returned by :py:meth:`~rig.routing_table.routing_tree_to_tables`. target_lengths : int or {(x, y): int or None, ...} or None Maximum length of routing tables. If an integer this is assumed to be the maximum length for any table; if a dictionary then it is assumed to be a mapping from co-ordinate to maximum length (or None); if None then tables will be minimised as far as possible. methods : Each method is tried in the order presented and the first to meet the required target length for a given chip is used. Consequently less computationally costly algorithms should be nearer the start of the list. The defaults will try to remove default routes (:py:meth:`rig.routing_table.remove_default_routes.minimise`) and then fall back on the ordered covering algorithm (:py:meth:`rig.routing_table.ordered_covering.minimise`). Returns ------- {(x, y): [:py:class:`~rig.routing_table.RoutingTableEntry`, ...], ...} Minimised routing tables, guaranteed to be at least as small as the table sizes specified by `target_lengths`. Raises ------ MinimisationFailedError If no method can sufficiently minimise a table.
def AddFXrefRead(self, method, classobj, field): if field not in self._fields: self._fields[field] = FieldClassAnalysis(field) self._fields[field].AddXrefRead(classobj, method)
Add a Field Read to this class :param method: :param classobj: :param field: :return:
def rescale_taps(taps): taps = np.array(taps) cs = sum(taps) for (i, x) in enumerate(taps): taps[i] = x / cs return taps.tolist()
Rescale taps in that way that their sum equals 1
def handle_simulation_end(self, data_portal): log.info( 'Simulated {} trading days\n' 'first open: {}\n' 'last close: {}', self._session_count, self._trading_calendar.session_open(self._first_session), self._trading_calendar.session_close(self._last_session), ) packet = {} self.end_of_simulation( packet, self._ledger, self._trading_calendar, self._sessions, data_portal, self._benchmark_source, ) return packet
When the simulation is complete, run the full period risk report and send it out on the results socket.
def recarray_to_hdf5_group(ra, parent, name, **kwargs): import h5py h5f = None if isinstance(parent, str): h5f = h5py.File(parent, mode='a') parent = h5f try: h5g = parent.require_group(name) for n in ra.dtype.names: array_to_hdf5(ra[n], h5g, n, **kwargs) return h5g finally: if h5f is not None: h5f.close()
Write each column in a recarray to a dataset in an HDF5 group. Parameters ---------- ra : recarray Numpy recarray to store. parent : string or h5py group Parent HDF5 file or group. If a string, will be treated as HDF5 file name. name : string Name or path of group to write data into. kwargs : keyword arguments Passed through to h5py require_dataset() function. Returns ------- h5g : h5py group
def get_subject_without_validation(jwt_bu64): try: jwt_dict = get_jwt_dict(jwt_bu64) except JwtException as e: return log_jwt_bu64_info(logging.error, str(e), jwt_bu64) try: return jwt_dict['sub'] except LookupError: log_jwt_dict_info(logging.error, 'Missing "sub" key', jwt_dict)
Extract subject from the JWT without validating the JWT. - The extracted subject cannot be trusted for authn or authz. Args: jwt_bu64: bytes JWT, encoded using a a URL safe flavor of Base64. Returns: str: The subject contained in the JWT.
def update_total(self, n=1): with self._lock: self._pbar.total += n self.refresh()
Increment total pbar value.
def find_and_refine_peaks(self, threshold, min_separation=1.0, use_cumul=False): if use_cumul: theMap = self._ts_cumul else: theMap = self._tsmap peaks = find_peaks(theMap, threshold, min_separation) for peak in peaks: o, skydir = fit_error_ellipse(theMap, (peak['ix'], peak['iy']), dpix=2) peak['fit_loc'] = o peak['fit_skydir'] = skydir if o['fit_success']: skydir = peak['fit_skydir'] else: skydir = peak['skydir'] return peaks
Run a simple peak-finding algorithm, and fit the peaks to paraboloids to extract their positions and error ellipses. Parameters ---------- threshold : float Peak threshold in TS. min_separation : float Radius of region size in degrees. Sets the minimum allowable separation between peaks. use_cumul : bool If true, used the cumulative TS map (i.e., the TS summed over the energy bins) instead of the TS Map from the fit to and index=2 powerlaw. Returns ------- peaks : list List of dictionaries containing the location and amplitude of each peak. Output of `~fermipy.sourcefind.find_peaks`
def style_print(*values, **kwargs): style = kwargs.pop("style", None) values = [style_format(value, style) for value in values] print(*values, **kwargs)
A convenience function that applies style_format to text before printing
def convertstatsmethod(method_str): if StringClass.string_match(method_str, 'Average'): return 'ave' elif StringClass.string_match(method_str, 'Maximum'): return 'max' elif StringClass.string_match(method_str, 'Minimum'): return 'min' elif method_str.lower() in ['ave', 'max', 'min']: return method_str.lower() else: return 'ave'
Convert statistics method to ave, min, and max.
def template(self): if self._template: return self._template template_json = self.read_template(self.args.tmplname) self._template = loads(template_json) return self._template
Returns the template in JSON form
def enum_choice_list(data): if not data: return {} try: choices = [x.value for x in data] except AttributeError: choices = data def _type(value): return next((x for x in choices if x.lower() == value.lower()), value) if value else value params = { 'choices': CaseInsensitiveList(choices), 'type': _type } return params
Creates the argparse choices and type kwargs for a supplied enum type or list of strings
def _register_update(self, method='isel', replot=False, dims={}, fmt={}, force=False, todefault=False): ArrayList._register_update(self, method=method, dims=dims) InteractiveBase._register_update(self, fmt=fmt, todefault=todefault, replot=bool(dims) or replot, force=force)
Register new dimensions and formatoptions for updating Parameters ---------- %(InteractiveArray._register_update.parameters)s
async def turn_on(self, switch=None): if switch is not None: switch = codecs.decode(switch.rjust(2, '0'), 'hex') packet = self.protocol.format_packet(b"\x10" + switch + b"\x01") else: packet = self.protocol.format_packet(b"\x0a") states = await self._send(packet) return states
Turn on relay.
def valid_conkey(self, conkey): for prefix in _COND_PREFIXES: trailing = conkey.lstrip(prefix) if trailing == '' and conkey: return True try: int(trailing) return True except ValueError: pass return False
Check that the conkey is a valid one. Return True if valid. A condition key is valid if it is one in the _COND_PREFIXES list. With the prefix removed, the remaining string must be either a number or the empty string.
def Expand(self, macro_ref_str): match = _MACRO_RE.match(macro_ref_str) if match is None or match.group(0) != macro_ref_str: raise PDDMError('Failed to parse macro reference: "%s"' % macro_ref_str) if match.group('name') not in self._macros: raise PDDMError('No macro named "%s".' % match.group('name')) return self._Expand(match, [], macro_ref_str)
Expands the macro reference. Args: macro_ref_str: String of a macro reference (i.e. foo(a, b)). Returns: The text from the expansion. Raises: PDDMError if there are any issues.
def _print_registers(self, registers): for reg, value in registers.items(): print(" %s : 0x%08x (%d)" % (reg, value, value))
Print registers.
def ctime(self, timezone=None): if timezone is None: timezone = self.timezone return time.ctime(self.__timestamp__ - timezone)
Returns a ctime string. :param timezone = self.timezone The timezone (in seconds west of UTC) to return the value in. By default, the timezone used when constructing the class is used (local one by default). To use UTC, use timezone = 0. To use the local tz, use timezone = chronyk.LOCALTZ.
def param(self, name, default=None): if not name in self.params: return default return self.params[name]
convenient function for returning an arbitrary MAVLink parameter with a default
async def add(self, setname, ip, timeout=0): args = ['add', '-exist', setname, ip, 'timeout', timeout] return await self.start(__class__.CMD, *args)
Adds the given IP address to the given ipset. If a timeout is given, the IP will stay in the ipset for the given duration. Else it's added forever. The resulting command looks like this: ``ipset add -exist ellis_blacklist4 192.0.2.10 timeout 14400``
def _pre_train(self, stop_param_updates, num_epochs, updates_epoch): updates = {k: stop_param_updates.get(k, num_epochs) * updates_epoch for k, v in self.params.items()} single_steps = {k: np.exp(-((1.0 - (1.0 / v))) * self.params[k]['factor']) for k, v in updates.items()} constants = {k: np.exp(-self.params[k]['factor']) / v for k, v in single_steps.items()} return constants
Set parameters and constants before training.
def index_all_layers(self): from hypermap.aggregator.models import Layer if not settings.REGISTRY_SKIP_CELERY: layers_cache = set(Layer.objects.filter(is_valid=True).values_list('id', flat=True)) deleted_layers_cache = set(Layer.objects.filter(is_valid=False).values_list('id', flat=True)) cache.set('layers', layers_cache) cache.set('deleted_layers', deleted_layers_cache) else: for layer in Layer.objects.all(): index_layer(layer.id)
Index all layers in search engine.
def load_from_file(cls, file_path): data = None if os.path.exists(file_path): metadata_file = open(file_path) data = json.loads(metadata_file.read()) return cls(initial=data)
Load the meta data given a file_path or empty meta data
def data_to_unicode(self, data): if isinstance(data, dict): return {self.to_unicode(k): self.to_unicode(v) for k, v in data.iteritems()} if isinstance(data, list): return [self.to_unicode(l) for l in data] else: return self.to_unicode(data)
Recursively convert a list or dictionary to unicode. Args: data: The data to be unicoded. Returns: Unicoded data.
def get_fields(self): columns = self.columns model = self.model fields = [] for col in columns: if isinstance(col, (str, unicode)): v = col.split('.') if len(v) > 1: field = get_model(v[0], engine_name=self.model.get_engine_name(), signal=False).properties(v[1]) else: field = model.properties[col] elif isinstance(col, Column): field = get_model(col.table.name, engine_name=self.model.get_engine_name(), signal=False).properties[col.name] else: field = col fields.append(field) return fields
get property instance according self.columns
def _convert_args(handler, args): args = list(args) params = inspect.signature(handler).parameters for i, (arg, name) in enumerate(zip(args, params)): default = params[name].default annotation = params[name].annotation if annotation != inspect.Parameter.empty: if isinstance(annotation, type) and annotation != str: args[i] = annotation(arg) elif default != inspect.Parameter.empty: if default is not None and not isinstance(default, str): args[i] = type(default)(arg) return args
Convert a list of command arguments to types specified by the handler. Args: handler: a command handler function. args: the list of string arguments to pass to handler. Returns: A new list containing `args` that have been converted to the expected type for `handler`. For each function parameter of `handler` that has either an explicit type annotation or a non-None default value, the corresponding element in `args` is converted to that type.