code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def round(self): x, y = self.anchor self.anchor = (normalizers.normalizeRounding(x), normalizers.normalizeRounding(y)) x, y = self.bcpIn self.bcpIn = (normalizers.normalizeRounding(x), normalizers.normalizeRounding(y)) x, y = self.bcpOut self.bcpOut = (normalizers.normalizeRounding(x), normalizers.normalizeRounding(y))
Round coordinates.
def structure_transform(self, original_structure, new_structure, refine_rotation=True): sm = StructureMatcher() if not sm.fit(original_structure, new_structure): warnings.warn("original and new structures do not match!") trans_1 = self.get_ieee_rotation(original_structure, refine_rotation) trans_2 = self.get_ieee_rotation(new_structure, refine_rotation) new = self.rotate(trans_1) new = new.rotate(np.transpose(trans_2)) return new
Transforms a tensor from one basis for an original structure into a new basis defined by a new structure. Args: original_structure (Structure): structure corresponding to the basis of the current tensor new_structure (Structure): structure corresponding to the desired basis refine_rotation (bool): whether to refine the rotations generated in get_ieee_rotation Returns: Tensor that has been transformed such that its basis corresponds to the new_structure's basis
def allDecisions(self, result, **values): data = self.__getDecision(result, multiple=True, **values) data = [data[value] for value in result] if len(data) == 1: return data[0] else: return data
Joust like self.decision but for multiple finded values. Returns: Arrays of arrays of finded elements or if finds only one mach, array of strings.
def _push_property_schema(self, prop): schema = Schema(self._schema.properties[prop]) self._push_schema(schema, ".properties." + prop)
Construct a sub-schema from a property of the current schema.
def get_teams(self): return self.make_request(host="erikberg.com", sport='nba', method="teams", id=None, format="json", parameters={})
Return json current roster of team
def canonical(self): if not hasattr(self, '_canonical'): self._canonical = conf.lib.clang_getCanonicalCursor(self) return self._canonical
Return the canonical Cursor corresponding to this Cursor. The canonical cursor is the cursor which is representative for the underlying entity. For example, if you have multiple forward declarations for the same class, the canonical cursor for the forward declarations will be identical.
def json_success(self, json): if type(json) is dict and 'result' in json and json['result'] == 'success': return True return False
Check the JSON response object for the success flag Parameters ---------- json : dict A dictionary representing a JSON object from lendingclub.com
def to_pfull_from_phalf(arr, pfull_coord): phalf_top = arr.isel(**{internal_names.PHALF_STR: slice(1, None)}) phalf_top = replace_coord(phalf_top, internal_names.PHALF_STR, internal_names.PFULL_STR, pfull_coord) phalf_bot = arr.isel(**{internal_names.PHALF_STR: slice(None, -1)}) phalf_bot = replace_coord(phalf_bot, internal_names.PHALF_STR, internal_names.PFULL_STR, pfull_coord) return 0.5*(phalf_bot + phalf_top)
Compute data at full pressure levels from values at half levels.
def recursive_setattr(obj: Any, attr: str, val: Any) -> Any: pre, _, post = attr.rpartition('.') return setattr(recursive_getattr(obj, pre) if pre else obj, post, val)
Recusrive ``setattr``. This can be used as a drop in for the standard ``setattr(...)``. Credit to: https://stackoverflow.com/a/31174427 Args: obj: Object to retrieve the attribute from. attr: Name of the attribute, with each successive attribute separated by a ".". value: Value to set the attribute to. Returns: The requested attribute. (Same as ``getattr``). Raises: AttributeError: If the attribute was not found and no default was provided. (Same as ``getattr``).
def set_progress_message(self, message, line_break=False): end = '\r' if not line_break else None @self.connect def on_progress(value, value_max, **kwargs): kwargs['end'] = None if value == value_max else end _default_on_progress(message, value, value_max, **kwargs)
Set a progress message. The string needs to contain `{progress}`.
async def _on_trace_notification(self, trace_event): conn_string = trace_event.get('connection_string') payload = trace_event.get('payload') await self.notify_event(conn_string, 'trace', payload)
Callback function called when a trace chunk is received. Args: trace_chunk (dict): The received trace chunk information
def cookiestring(self, value): c = Cookie.SimpleCookie(value) sc = [(i.key, i.value) for i in c.values()] self.cookies = dict(sc)
Cookie string setter
def trigger_streamer(*inputs, **kwargs): streamer_marker = kwargs['mark_streamer'] try: reading = inputs[1].pop() except StreamEmptyError: return [] finally: for input_x in inputs: input_x.skip_all() try: streamer_marker(reading.value) except ArgumentError: return [] return [IOTileReading(0, 0, 0)]
Trigger a streamer based on the index read from input b. Returns: list(IOTileReading)
def _is_domain_match(domain: str, hostname: str) -> bool: if hostname == domain: return True if not hostname.endswith(domain): return False non_matching = hostname[:-len(domain)] if not non_matching.endswith("."): return False return not is_ip_address(hostname)
Implements domain matching adhering to RFC 6265.
def _save_fastq_space(items): to_cleanup = {} for data in (utils.to_single_data(x) for x in items): for fname in data.get("files", []): if os.path.realpath(fname).startswith(dd.get_work_dir(data)): to_cleanup[fname] = data["config"] for fname, config in to_cleanup.items(): utils.save_diskspace(fname, "Cleanup prep files after alignment finished", config)
Potentially save fastq space prior to merging, since alignments done.
def dumps(obj, indent=None, default=None, sort_keys=False, **kw): return YAMLEncoder(indent=indent, default=default, sort_keys=sort_keys, **kw).encode(obj)
Dump string.
def _check_required_fields(self, fields=None, either_fields=None): for (key, value) in fields.items(): if not value: raise HSException("Field '%s' is required." % key) if either_fields is not None: for field in either_fields: if not any(field.values()): raise HSException("One of the following fields is required: %s" % ", ".join(field.keys()))
Check the values of the fields If no value found in `fields`, an exception will be raised. `either_fields` are the fields that one of them must have a value Raises: HSException: If no value found in at least one item of`fields`, or no value found in one of the items of `either_fields` Returns: None
def _create_field(self, field_id): config = self._field_configs[field_id] adapter = self._adapters[config['type']] if 'name' in config: name = config['name'] else: name = None if 'size' in config: columns = config['size'] else: columns = None if 'values' in config: values = config['values'] else: values = None field = adapter.get_field(name, columns, values) if 'results_name' in config: field = field.setResultsName(config['results_name']) else: field = field.setResultsName(field_id) return field
Creates the field with the specified parameters. :param field_id: identifier for the field :return: the basic rule for the field
def count(args): p = OptionParser(count.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) coveragefile, fastafile = args countsfile = coveragefile.split(".")[0] + ".bin" if op.exists(countsfile): logging.error("`{0}` file exists. Remove before proceed."\ .format(countsfile)) return fastasize, sizes, offsets = get_offsets(fastafile) logging.debug("Initialize array of uint8 with size {0}".format(fastasize)) ar = np.zeros(fastasize, dtype=np.uint8) update_array(ar, coveragefile, sizes, offsets) ar.tofile(countsfile) logging.debug("Array written to `{0}`".format(countsfile))
%prog count t.coveragePerBase fastafile Serialize the genomeCoverage results. The coordinate system of the count array will be based on the fastafile.
def search_users(self, user): user_url = "%s/%s/%s" % (self.url, "user", user) response = self.jss.get(user_url) return LDAPUsersResults(self.jss, response)
Search for LDAP users. Args: user: User to search for. It is not entirely clear how the JSS determines the results- are regexes allowed, or globbing? Returns: LDAPUsersResult object. Raises: Will raise a JSSGetError if no results are found.
def save(self): if self.Death: self.Alive = False super(Animal, self).save()
The save method for Animal class is over-ridden to set Alive=False when a Death date is entered. This is not the case for a cause of death.
def _extract_intensities(image, mask = slice(None)): return numpy.array(image, copy=True)[mask].ravel()
Internal, single-image version of `intensities`.
def inception_v3_arg_scope(weight_decay=0.00004, stddev=0.1, batch_norm_var_collection='moving_vars'): batch_norm_params = { 'decay': 0.9997, 'epsilon': 0.001, 'updates_collections': tf.GraphKeys.UPDATE_OPS, 'variables_collections': { 'beta': None, 'gamma': None, 'moving_mean': [batch_norm_var_collection], 'moving_variance': [batch_norm_var_collection], } } with slim.arg_scope([slim.conv2d, slim.fully_connected], weights_regularizer=slim.l2_regularizer(weight_decay)): with slim.arg_scope([slim.conv2d], weights_initializer=tf.truncated_normal_initializer(stddev=stddev), activation_fn=tf.nn.relu, normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params) as sc: return sc
Defines the default InceptionV3 arg scope. Args: weight_decay: The weight decay to use for regularizing the model. stddev: The standard deviation of the trunctated normal weight initializer. batch_norm_var_collection: The name of the collection for the batch norm variables. Returns: An `arg_scope` to use for the inception v3 model.
def _extract_field_with_regex(self, field): matched = re.search(field, self.text) if not matched: err_msg = u"Failed to extract data with regex! => {}\n".format(field) err_msg += u"response body: {}\n".format(self.text) logger.log_error(err_msg) raise exceptions.ExtractFailure(err_msg) return matched.group(1)
extract field from response content with regex. requests.Response body could be json or html text. Args: field (str): regex string that matched r".*\(.*\).*" Returns: str: matched content. Raises: exceptions.ExtractFailure: If no content matched with regex. Examples: >>> # self.text: "LB123abcRB789" >>> filed = "LB[\d]*(.*)RB[\d]*" >>> _extract_field_with_regex(field) abc
def choose_branch(exclude=None): if exclude is None: master = conf.get('git.master_branch', 'master') develop = conf.get('git.devel_branch', 'develop') exclude = {master, develop} branches = list(set(git.branches()) - exclude) for i, branch_name in enumerate(branches): shell.cprint('<90>[{}] <33>{}'.format(i + 1, branch_name)) choice = 0 while choice < 1 or choice > len(branches): prompt = "Pick a base branch from the above [1-{}]".format( len(branches) ) choice = click.prompt(prompt, value_proc=int) if not (1 <= choice <= len(branches)): fmt = "Invalid choice {}, you must pick a number between {} and {}" log.err(fmt.format(choice, 1, len(branches))) return branches[choice - 1]
Show the user a menu to pick a branch from the existing ones. Args: exclude (list[str]): List of branch names to exclude from the menu. By default it will exclude master and develop branches. To show all branches pass an empty array here. Returns: str: The name of the branch chosen by the user. If the user inputs an invalid choice, he will be asked again (and again) until he picks a a valid branch.
def shutdown(self): status = self.can.close(self.handle) if status != CANAL_ERROR_SUCCESS: raise CanError("could not shut down bus: status == {}".format(status))
Shuts down connection to the device safely. :raise cam.CanError: is closing the connection did not work
def get_response_for_url(self, url): if not url or "//" not in url: raise ValueError("Missing or invalid url: %s" % url) render_url = self.BASE_URL + url headers = { 'X-Prerender-Token': self.token, } r = self.session.get(render_url, headers=headers, allow_redirects=False) assert r.status_code < 500 return self.build_django_response_from_requests_response(r)
Accepts a fully-qualified url. Returns an HttpResponse, passing through all headers and the status code.
def set_xml_output(self, xml_file): if self.get_database() is None: raise ValueError, "no database specified" self.add_file_opt('extract', xml_file) self.__xml_output = xml_file
Tell ligolw_sqlite to dump the contents of the database to a file.
def get(self): model = self.oracle.compute() if model: if self.htype == 'rc2': self.hset = filter(lambda v: v > 0, model) else: self.hset = model return list(map(lambda vid: self.idpool.id2obj[vid], self.hset))
This method computes and returns a hitting set. The hitting set is obtained using the underlying oracle operating the MaxSAT problem formulation. The computed solution is mapped back to objects of the problem domain. :rtype: list(obj)
def applications(self): url = self._url + "/applications" params = {"f" : "json"} res = self._get(url=url, param_dict=params, proxy_url=self._proxy_url, proxy_port=self._proxy_port) items = [] if "applications" in res.keys(): for apps in res['applications']: items.append( self.Application(url="%s/%s" % (self._url, apps['username']), securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port) ) return items
returns all the group applications to join
def pad_vocabulary(self, vocab, pad): vocab_size = len(vocab) padded_vocab_size = (vocab_size + pad - 1) // pad * pad for i in range(0, padded_vocab_size - vocab_size): token = f'madeupword{i:04d}' vocab.append(token) assert len(vocab) % pad == 0
Pads vocabulary to a multiple of 'pad' tokens. :param vocab: list with vocabulary :param pad: integer
def get_cert_builder(expires): now = datetime.utcnow().replace(second=0, microsecond=0) if expires is None: expires = get_expires(expires, now=now) expires = expires.replace(second=0, microsecond=0) builder = x509.CertificateBuilder() builder = builder.not_valid_before(now) builder = builder.not_valid_after(expires) builder = builder.serial_number(x509.random_serial_number()) return builder
Get a basic X509 cert builder object. Parameters ---------- expires : datetime When this certificate will expire.
def get_root_outcome_group(self): from canvasapi.outcome import OutcomeGroup response = self.__requester.request( 'GET', 'global/root_outcome_group' ) return OutcomeGroup(self.__requester, response.json())
Redirect to root outcome group for context :calls: `GET /api/v1/global/root_outcome_group \ <https://canvas.instructure.com/doc/api/outcome_groups.html#method.outcome_groups_api.redirect>`_ :returns: The OutcomeGroup of the context. :rtype: :class:`canvasapi.outcome.OutcomeGroup`
def process_tag(node): text = '' exceptions = ['table'] for element in node.children: if isinstance(element, NavigableString): text += element elif not node.name in exceptions: text += process_tag(element) try: convert_fn = globals()["convert_%s" % node.name.lower()] text = convert_fn(node, text) except KeyError: pass return text
Recursively go through a tag's children, converting them, then convert the tag itself.
def webcam_attach(self, path, settings): if not isinstance(path, basestring): raise TypeError("path can only be an instance of type basestring") if not isinstance(settings, basestring): raise TypeError("settings can only be an instance of type basestring") self._call("webcamAttach", in_p=[path, settings])
Attaches the emulated USB webcam to the VM, which will use a host video capture device. in path of type str The host path of the capture device to use. in settings of type str Optional settings.
def _uniq(self): pd = [] for d in range(1, self.maxdepth): pd.extend(map(lambda x: int(4**(d+1) + x), self.pixeldict[d])) return sorted(pd)
Create a list of all the pixels that cover this region. This list contains overlapping pixels of different orders. Returns ------- pix : list A list of HEALPix pixel numbers.
def preview(self, argv): opts = cmdline(argv, FLAGS_RESULTS) self.foreach(opts.args, lambda job: output(job.preview(**opts.kwargs)))
Retrieve the preview for the specified search jobs.
def get_database_columns(self, tables=None, database=None): source = database if database else self.database tables = tables if tables else self.tables return {tbl: self.get_columns(tbl) for tbl in tqdm(tables, total=len(tables), desc='Getting {0} columns'.format(source))}
Retrieve a dictionary of columns.
def get_cluster_view(p): from cluster_helper import cluster as ipc return ipc.cluster_view(p['scheduler'], p['queue'], p['num_jobs'], p['cores_per_job'], start_wait=p['timeout'], extra_params={"resources": p['resources'], "mem": p['mem'], "tag": p['tag'], "run_local": False})
get ipython running
def _check_and_uninstall_ruby(ret, ruby, user=None): ret = _ruby_installed(ret, ruby, user=user) if ret['result']: if ret['default']: __salt__['rbenv.default']('system', runas=user) if __salt__['rbenv.uninstall_ruby'](ruby, runas=user): ret['result'] = True ret['changes'][ruby] = 'Uninstalled' ret['comment'] = 'Successfully removed ruby' return ret else: ret['result'] = False ret['comment'] = 'Failed to uninstall ruby' return ret else: ret['result'] = True ret['comment'] = 'Ruby {0} is already absent'.format(ruby) return ret
Verify that ruby is uninstalled
def lower_bollinger_band(data, period, std=2.0): catch_errors.check_for_period_error(data, period) period = int(period) simple_ma = sma(data, period)[period-1:] lower_bb = [] for idx in range(len(data) - period + 1): std_dev = np.std(data[idx:idx + period]) lower_bb.append(simple_ma[idx] - std_dev * std) lower_bb = fill_for_noncomputable_vals(data, lower_bb) return np.array(lower_bb)
Lower Bollinger Band. Formula: u_bb = SMA(t) - STD(SMA(t-n:t)) * std_mult
def fetch_cache_key(request): m = hashlib.md5() m.update(request.body) return m.hexdigest()
Returns a hashed cache key.
def compare_files(path1, path2): diff = difflib.ndiff(open(path1).readlines(), open(path2).readlines()) return [x for x in diff if x[0] in ['-', '+', '?']]
Returns the delta between two files using -, ?, + format excluding lines that are the same Args: path1 (str): Path to first file path2 (str): Path to second file Returns: List[str]: Delta between the two files
def _do_private_mode(FetcherClass, services, kwargs, random_wait_seconds, timeout, verbose): addresses = kwargs.pop('addresses') results = {} with futures.ThreadPoolExecutor(max_workers=len(addresses)) as executor: fetches = {} for address in addresses: k = kwargs k['address'] = address random.shuffle(services) srv = FetcherClass( services=services, verbose=verbose, timeout=timeout or 5.0, random_wait_seconds=random_wait_seconds ) fetches[executor.submit(srv.action, **k)] = (srv, address) to_iterate = futures.as_completed(fetches) for future in to_iterate: service, address = fetches[future] results[address] = future.result() return results
Private mode is only applicable to address_balance, unspent_outputs, and historical_transactions. There will always be a list for the `addresses` argument. Each address goes to a random service. Also a random delay is performed before the external fetch for improved privacy.
def calculate_hash_of_dir(directory, file_list=None): md5_hash = md5() if not os.path.exists(directory): return -1 try: for subdir, dirs, files in os.walk(directory): for _file in files: file_path = os.path.join(subdir, _file) if file_list is not None and file_path not in file_list: continue try: _file_object = open(file_path, 'rb') except Exception: _file_object.close() return -1 while 1: buf = _file_object.read(4096) if not buf: break md5_hash.update(md5(buf).hexdigest().encode()) _file_object.close() except Exception: return -1 return md5_hash.hexdigest()
Calculate hash of directory.
def pool(self): self._pool = self._pool or gevent.pool.Pool(size=self.pool_size) return self._pool
Get an gevent pool used to dispatch requests.
def _set_survey_scenario(self, survey_scenario): self.survey_scenario = survey_scenario if survey_scenario.simulation is None: survey_scenario.simulation = survey_scenario.new_simulation() period = self.period self.filter_by = filter_by = survey_scenario.calculate_variable( variable = self.filter_by_name, period = period) self.weight_name = weight_name = self.survey_scenario.weight_column_name_by_entity['menage'] self.initial_weight_name = weight_name + "_ini" self.initial_weight = initial_weight = survey_scenario.calculate_variable( variable = weight_name, period = period) self.initial_total_population = sum(initial_weight * filter_by) self.weight = survey_scenario.calculate_variable(variable = weight_name, period = period)
Set survey scenario :param survey_scenario: the survey scenario
def chord_counts(im): r labels, N = spim.label(im > 0) props = regionprops(labels, coordinates='xy') chord_lens = sp.array([i.filled_area for i in props]) return chord_lens
r""" Finds the length of each chord in the supplied image and returns a list of their individual sizes Parameters ---------- im : ND-array An image containing chords drawn in the void space. Returns ------- result : 1D-array A 1D array with one element for each chord, containing its length. Notes ---- The returned array can be passed to ``plt.hist`` to plot the histogram, or to ``sp.histogram`` to get the histogram data directly. Another useful function is ``sp.bincount`` which gives the number of chords of each length in a format suitable for ``plt.plot``.
def cycle_find_app(_parser, cmd, args): parser = argparse.ArgumentParser( prog=_parser.prog, description=_parser.description, ) parser.add_argument('-w', '--width', type=int, default=4, help='the length of the cycled value') parser.add_argument('value', help='the value to determine the position of, read from stdin if missing', nargs='?') args = parser.parse_args(args) index = cycle_find(pwnypack.main.string_value_or_stdin(args.value), args.width) if index == -1: print('Not found.') sys.exit(1) else: print('Found at position: %d' % index)
Find the first position of a value in a de Bruijn sequence.
def _OpenCollectionPath(coll_path): hunt_collection = results.HuntResultCollection(coll_path) if hunt_collection and hunt_collection[0].payload: return hunt_collection indexed_collection = sequential_collection.GeneralIndexedCollection(coll_path) if indexed_collection: return indexed_collection
Tries to open various types of collections at the given path.
def interface_to_relations(interface_name): results = [] for role in ('provides', 'requires', 'peers'): results.extend(role_and_interface_to_relations(role, interface_name)) return results
Given an interface, return a list of relation names for the current charm that use that interface. :returns: A list of relation names.
def make_quantile_df(data, draw_quantiles): dens = data['density'].cumsum() / data['density'].sum() ecdf = interp1d(dens, data['y'], assume_sorted=True) ys = ecdf(draw_quantiles) violin_xminvs = interp1d(data['y'], data['xminv'])(ys) violin_xmaxvs = interp1d(data['y'], data['xmaxv'])(ys) data = pd.DataFrame({ 'x': interleave(violin_xminvs, violin_xmaxvs), 'y': np.repeat(ys, 2), 'group': np.repeat(np.arange(1, len(ys)+1), 2)}) return data
Return a dataframe with info needed to draw quantile segments
def construct_blastall_cmdline( fname1, fname2, outdir, blastall_exe=pyani_config.BLASTALL_DEFAULT ): fstem1 = os.path.splitext(os.path.split(fname1)[-1])[0] fstem2 = os.path.splitext(os.path.split(fname2)[-1])[0] fstem1 = fstem1.replace("-fragments", "") prefix = os.path.join(outdir, "%s_vs_%s" % (fstem1, fstem2)) cmd = ( "{0} -p blastn -o {1}.blast_tab -i {2} -d {3} " + "-X 150 -q -1 -F F -e 1e-15 " + "-b 1 -v 1 -m 8" ) return cmd.format(blastall_exe, prefix, fname1, fname2)
Returns a single blastall command. - blastall_exe - path to BLASTALL executable
def shot_start_data(shot, role): if role == QtCore.Qt.DisplayRole: return str(shot.startframe)
Return the data for startframe :param shot: the shot that holds the data :type shot: :class:`jukeboxcore.djadapter.models.Shot` :param role: item data role :type role: QtCore.Qt.ItemDataRole :returns: data for the start :rtype: depending on role :raises: None
def emit_toi_stats(toi_set, peripherals): count_by_zoom = defaultdict(int) total = 0 for coord_int in toi_set: coord = coord_unmarshall_int(coord_int) count_by_zoom[coord.zoom] += 1 total += 1 peripherals.stats.gauge('tiles-of-interest.count', total) for zoom, count in count_by_zoom.items(): peripherals.stats.gauge( 'tiles-of-interest.by-zoom.z{:02d}'.format(zoom), count )
Calculates new TOI stats and emits them via statsd.
def fd_taper(out, start, end, beta=8, side='left'): out = out.copy() width = end - start winlen = 2 * int(width / out.delta_f) window = Array(signal.get_window(('kaiser', beta), winlen)) kmin = int(start / out.delta_f) kmax = kmin + winlen//2 if side == 'left': out[kmin:kmax] *= window[:winlen//2] out[:kmin] *= 0. elif side == 'right': out[kmin:kmax] *= window[winlen//2:] out[kmax:] *= 0. else: raise ValueError("unrecognized side argument {}".format(side)) return out
Applies a taper to the given FrequencySeries. A half-kaiser window is used for the roll-off. Parameters ---------- out : FrequencySeries The ``FrequencySeries`` to taper. start : float The frequency (in Hz) to start the taper window. end : float The frequency (in Hz) to end the taper window. beta : int, optional The beta parameter to use for the Kaiser window. See ``scipy.signal.kaiser`` for details. Default is 8. side : {'left', 'right'} The side to apply the taper to. If ``'left'`` (``'right'``), the taper will roll up (down) between ``start`` and ``end``, with all values before ``start`` (after ``end``) set to zero. Default is ``'left'``. Returns ------- FrequencySeries The tapered frequency series.
def setNamedItem(self, item: Attr) -> None: from wdom.web_node import WdomElement if not isinstance(item, Attr): raise TypeError('item must be an instance of Attr') if isinstance(self._owner, WdomElement): self._owner.js_exec('setAttribute', item.name, item.value) self._dict[item.name] = item item._owner = self._owner
Set ``Attr`` object in this collection.
def from_T050017(cls, url, coltype = LIGOTimeGPS): match = cls._url_regex.search(url) if not match: raise ValueError("could not convert %s to CacheEntry" % repr(url)) observatory = match.group("obs") description = match.group("dsc") start = match.group("strt") duration = match.group("dur") if start == "-" and duration == "-": segment = None else: segment = segments.segment(coltype(start), coltype(start) + coltype(duration)) return cls(observatory, description, segment, url)
Parse a URL in the style of T050017-00 into a CacheEntry. The T050017-00 file name format is, essentially, observatory-description-start-duration.extension Example: >>> c = CacheEntry.from_T050017("file://localhost/data/node144/frames/S5/strain-L2/LLO/L-L1_RDS_C03_L2-8365/L-L1_RDS_C03_L2-836562330-83.gwf") >>> c.observatory 'L' >>> c.host 'localhost' >>> os.path.basename(c.path) 'L-L1_RDS_C03_L2-836562330-83.gwf'
def connection_sync(self, connection_id, connProps=None): if connProps is None: connProps = {} request = requests_pb2.ConnectionSyncRequest() request.connection_id = connection_id request.conn_props.auto_commit = connProps.get('autoCommit', False) request.conn_props.has_auto_commit = True request.conn_props.read_only = connProps.get('readOnly', False) request.conn_props.has_read_only = True request.conn_props.transaction_isolation = connProps.get('transactionIsolation', 0) request.conn_props.catalog = connProps.get('catalog', '') request.conn_props.schema = connProps.get('schema', '') response_data = self._apply(request) response = responses_pb2.ConnectionSyncResponse() response.ParseFromString(response_data) return response.conn_props
Synchronizes connection properties with the server. :param connection_id: ID of the current connection. :param connProps: Dictionary with the properties that should be changed. :returns: A ``common_pb2.ConnectionProperties`` object.
def attach(domain, filename): log.info('Attaching datasets for domain %s', domain) result = actions.attach(domain, filename) log.info('Attached %s datasets to %s', result.success, domain)
Attach existing datasets to their harvest remote id Mapping between identifiers should be in FILENAME CSV file.
def create_md5(path): m = hashlib.md5() with open(path, "rb") as f: while True: data = f.read(8192) if not data: break m.update(data) return m.hexdigest()
Create the md5 hash of a file using the hashlib library.
def recover_all(lbn, profile='default'): ret = {} config = get_running(profile) try: workers_ = config['worker.{0}.balance_workers'.format(lbn)].split(',') except KeyError: return ret for worker in workers_: curr_state = worker_status(worker, profile) if curr_state['activation'] != 'ACT': worker_activate(worker, lbn, profile) if not curr_state['state'].startswith('OK'): worker_recover(worker, lbn, profile) ret[worker] = worker_status(worker, profile) return ret
Set the all the workers in lbn to recover and activate them if they are not CLI Examples: .. code-block:: bash salt '*' modjk.recover_all loadbalancer1 salt '*' modjk.recover_all loadbalancer1 other-profile
def split_qname(self, cybox_id): if ':' in cybox_id: (namespace, uid) = cybox_id.split(':', 1) else: namespace = None uid = cybox_id if namespace and namespace in self.namespace_dict: namespace_uri = self.namespace_dict[namespace] else: logger.warning("Could not retrieve namespace for identifier %s" % (cybox_id)) namespace_uri = None if not namespace_uri: if self.default_identifier_ns_uri: namespace_uri = self.default_identifier_ns_uri else: namespace_uri = "%s/%s" % (DINGOS_MISSING_ID_NAMESPACE_URI_PREFIX, namespace) return (namespace, namespace_uri, uid)
Separate the namespace from the identifier in a qualified name and lookup the namespace URI associated with the given namespace.
def _build_meta(meta: str, pipelines: Iterable['Pipeline']) -> 'Pipeline': return Pipeline(protocol=[ { 'meta': meta, 'pipelines': [ pipeline.protocol for pipeline in pipelines ] }, ])
Build a pipeline with a given meta-argument. :param meta: either union or intersection :param pipelines:
def load(cls, path: str, password: str = None) -> 'Account': with open(path) as f: keystore = json.load(f) if not check_keystore_json(keystore): raise ValueError('Invalid keystore file') return Account(keystore, password, path=path)
Load an account from a keystore file. Args: path: full path to the keyfile password: the password to decrypt the key file or `None` to leave it encrypted
def initialize(self, calc_reg): self._isinitialized = True self.calc_order = topological_sort(calc_reg.dependencies)
Initialize the simulation. Organize calculations by dependency. :param calc_reg: Calculation registry. :type calc_reg: :class:`~simkit.core.calculation.CalcRegistry`
def docs(root_url, path): root_url = root_url.rstrip('/') path = path.lstrip('/') if root_url == OLD_ROOT_URL: return 'https://docs.taskcluster.net/{}'.format(path) else: return '{}/docs/{}'.format(root_url, path)
Generate URL for path in the Taskcluster docs.
def logical_intf_helper(interface): if interface is None: return LogicalInterface.get_or_create(name='default_eth').href elif isinstance(interface, LogicalInterface): return interface.href elif interface.startswith('http'): return interface return LogicalInterface.get_or_create(name=interface).href
Logical Interface finder by name. Create if it doesn't exist. This is useful when adding logical interfaces to for inline or capture interfaces. :param interface: logical interface name :return str href: href of logical interface
def error_handler(task): @wraps(task) def wrapper(self, *args, **kwargs): try: return task(self, *args, **kwargs) except Exception as e: self.connected = False if not self.testing: exc_type, exc_obj, exc_tb = sys.exc_info() fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1] error_message = ( "[" + str(datetime.now()) + "] Error in task \"" + task.__name__ + "\" (" + fname + "/" + str(exc_tb.tb_lineno) + ")" + e.message ) self.logger.error("%s: RPC instruction failed" % error_message) return wrapper
Handle and log RPC errors.
def GenerateFileData(self): while 1: line = self.rfile.readline() chunk_size = int(line.split(";")[0], 16) if chunk_size == 0: break for chunk in self._GenerateChunk(chunk_size): yield chunk lf = self.rfile.read(2) if lf != "\r\n": raise IOError("Unable to parse chunk.") for header in self.rfile.readline(): if not header: break
Generates the file data for a chunk encoded file.
def to_yaml(value, pretty=False): if not yaml: raise NotImplementedError('No supported YAML library available') options = { 'Dumper': BasicYamlDumper, 'allow_unicode': True, } options['default_flow_style'] = not pretty return yaml.dump(value, **options).rstrip()
Serializes the given value to YAML. :param value: the value to serialize :param pretty: whether or not to format the output in a more human-readable way; if not specified, defaults to ``False`` :type pretty: bool :rtype: str
def set_backend(self, backend): if isinstance(backend, str): name = backend args = [] kwargs = {} elif isinstance(backend, (tuple, list)): name = '' args = [] kwargs = {} for i in range(len(backend)): if i == 0: name = backend[i] else: if isinstance(backend[i], dict): kwargs.update(backend[i]) else: args += backend[i] else: raise TypeError("Backend must be string, tuple, or list") if name in available_backends: self.backend = name self._backend = available_backends[name]() elif name is None: raise Exception(("A backend (e.g. 'jNeuroML' or 'NEURON') " "must be selected")) else: raise Exception("Backend %s not found in backends.py" % name) self._backend.model = self self._backend.init_backend(*args, **kwargs)
Set the simulation backend.
def eigvalsh(a, eigvec=False): if eigvec == True: val, vec = eigh(a, eigvec=True) return val, gvar.mean(vec) else: return eigh(a, eigvec=False)
Eigenvalues of Hermitian matrix ``a``. Args: a: Two-dimensional, square Hermitian matrix/array of numbers and/or :class:`gvar.GVar`\s. Array elements must be real-valued if `gvar.GVar`\s are involved (i.e., symmetric matrix). eigvec (bool): If ``True``, method returns a tuple of arrays ``(val, vec)`` where ``val[i]`` are the eigenvalues of ``a``, and ``vec[:, i]`` are the mean values of the corresponding eigenvectors. Only ``val`` is returned if ``eigvec=False`` (default). Returns: Array ``val`` of eigenvalues of matrix ``a`` if parameter ``eigvec==False`` (default); otherwise a tuple of arrays ``(val, vec)`` where ``val[i]`` are the eigenvalues (in ascending order) and ``vec[:, i]`` are the mean values of the corresponding eigenvectors. Raises: ValueError: If matrix is not square and two-dimensional.
def elevation_along_path(client, path, samples): if type(path) is str: path = "enc:%s" % path else: path = convert.shortest_path(path) params = { "path": path, "samples": samples } return client._request("/maps/api/elevation/json", params).get("results", [])
Provides elevation data sampled along a path on the surface of the earth. :param path: An encoded polyline string, or a list of latitude/longitude values from which you wish to calculate elevation data. :type path: string, dict, list, or tuple :param samples: The number of sample points along a path for which to return elevation data. :type samples: int :rtype: list of elevation data responses
def JsonResponseModel(self): old_model = self.response_type_model self.__response_type_model = 'json' yield self.__response_type_model = old_model
In this context, return raw JSON instead of proto.
def get_knowledge_base(project_id, knowledge_base_id): import dialogflow_v2beta1 as dialogflow client = dialogflow.KnowledgeBasesClient() knowledge_base_path = client.knowledge_base_path( project_id, knowledge_base_id) response = client.get_knowledge_base(knowledge_base_path) print('Got Knowledge Base:') print(' - Display Name: {}'.format(response.display_name)) print(' - Knowledge ID: {}'.format(response.name))
Gets a specific Knowledge base. Args: project_id: The GCP project linked with the agent. knowledge_base_id: Id of the Knowledge base.
def on_leave(self): if self.roomId != '-1': logging.debug('chat: leave room (roomId: %s)' % self.roomId) self.publishToOther(self.roomId, 'leave', { 'username': self.username }) self.leave(self.roomId) self.initialize()
Quit chat room
def _display_choices(self, choices): print("Choose the number of the correct choice:") choice_map = {} for i, choice in enumerate(choices): i = str(i) print('{}) {}'.format(i, format.indent(choice, ' ' * (len(i) + 2)).strip())) choice = format.normalize(choice) choice_map[i] = choice return choice_map
Prints a mapping of numbers to choices and returns the mapping as a dictionary.
def get_all(self): for name, values in self._map.items(): for value in values: yield (name, value)
Return an iterator of name-value pairs.
def _parse_body(self, body): if is_python3(): return json.loads(body.decode('UTF-8')) else: return json.loads(body)
For just call a deserializer for FORMAT
def get_election_electoral_votes(self, election): candidate_election = CandidateElection.objects.get( candidate=self, election=election ) return candidate_election.electoral_votes.all()
Get all electoral votes for this candidate in an election.
def pad_block(block, block_size): unique_vals, unique_counts = np.unique(block, return_counts=True) most_frequent_value = unique_vals[np.argmax(unique_counts)] return np.pad(block, tuple((0, desired_size - actual_size) for desired_size, actual_size in zip(block_size, block.shape)), mode="constant", constant_values=most_frequent_value)
Pad a block to block_size with its most frequent value
def set_payload_format(self, payload_format): request = { "command": "payload_format", "format": payload_format } status = self._check_command_response_status(request) self.format = payload_format return status
Set the payload format for messages sent to and from the VI. Returns True if the command was successful.
def validate_unique(self, exclude=None): errors = {} try: super(TranslatableModelMixin, self).validate_unique(exclude=exclude) except ValidationError as e: errors = e.message_dict for local_cache in six.itervalues(self._translations_cache): for translation in six.itervalues(local_cache): if is_missing(translation): continue try: translation.validate_unique(exclude=exclude) except ValidationError as e: errors.update(e.message_dict) if errors: raise ValidationError(errors)
Also validate the unique_together of the translated model.
def vertex_normals(self): assert hasattr(self.faces_sparse, 'dot') vertex_normals = geometry.mean_vertex_normals( vertex_count=len(self.vertices), faces=self.faces, face_normals=self.face_normals, sparse=self.faces_sparse) return vertex_normals
The vertex normals of the mesh. If the normals were loaded we check to make sure we have the same number of vertex normals and vertices before returning them. If there are no vertex normals defined or a shape mismatch we calculate the vertex normals from the mean normals of the faces the vertex is used in. Returns ---------- vertex_normals : (n,3) float Represents the surface normal at each vertex. Where n == len(self.vertices)
def contribute_to_related_class(self, cls, related): super(VersionedForeignKey, self).contribute_to_related_class(cls, related) accessor_name = related.get_accessor_name() if hasattr(cls, accessor_name): setattr(cls, accessor_name, VersionedReverseManyToOneDescriptor(related))
Override ForeignKey's methods, and replace the descriptor, if set by the parent's methods
def raise_(tp, value=None, tb=None): if value is not None and isinstance(tp, Exception): raise TypeError("instance exception may not have a separate value") if value is not None: exc = tp(value) else: exc = tp if exc.__traceback__ is not tb: raise exc.with_traceback(tb) raise exc
A function that matches the Python 2.x ``raise`` statement. This allows re-raising exceptions with the cls value and traceback on Python 2 and 3.
def load_tensor(f, format=None): s = _load_bytes(f) return load_tensor_from_string(s, format=format)
Loads a serialized TensorProto into memory @params f can be a file-like object (has "read" function) or a string containing a file name format is for future use @return Loaded in-memory TensorProto
def traverse_dependency_graph(self, ref, collector, memo=None): resolved_ref = self.refs_by_unversioned_refs.get(ref.unversioned) if resolved_ref: ref = resolved_ref if memo is None: memo = dict() visited = set() return self._do_traverse_dependency_graph(ref, collector, memo, visited)
Traverses module graph, starting with ref, collecting values for each ref into the sets created by the collector function. :param ref an IvyModuleRef to start traversing the ivy dependency graph :param collector a function that takes a ref and returns a new set of values to collect for that ref, which will also be updated with all the dependencies accumulated values :param memo is a dict of ref -> set that memoizes the results of each node in the graph. If provided, allows for retaining cache across calls. :returns the accumulated set for ref
def full_info(**kwargs): conn = __get_conn(**kwargs) info = {'freecpu': _freecpu(conn), 'freemem': _freemem(conn), 'node_info': _node_info(conn), 'vm_info': vm_info()} conn.close() return info
Return the node_info, vm_info and freemem :param connection: libvirt connection URI, overriding defaults .. versionadded:: 2019.2.0 :param username: username to connect with, overriding defaults .. versionadded:: 2019.2.0 :param password: password to connect with, overriding defaults .. versionadded:: 2019.2.0 CLI Example: .. code-block:: bash salt '*' virt.full_info
def qc_data(self, tests, alias=None): r = {m: c.quality(tests, alias) for m, c in self.data.items()} s = self.qc_curve_group(tests, alias=alias) for m, results in r.items(): if m in s: results.update(s[m]) return r
Run a series of tests against the data and return the corresponding results. Args: tests (list): a list of functions. Returns: list. The results. Stick to booleans (True = pass) or ints.
def serialize(obj, **options): try: if 'file_out' in options: return toml.dump(obj, options['file_out'], **options) else: return toml.dumps(obj, **options) except Exception as error: raise SerializationError(error)
Serialize Python data to TOML. :param obj: the data structure to serialize. :param options: options given to lower pytoml module.
def is_ancestor(self, ancestor_rev, rev): try: self.git.merge_base(ancestor_rev, rev, is_ancestor=True) except GitCommandError as err: if err.status == 1: return False raise return True
Check if a commit is an ancestor of another :param ancestor_rev: Rev which should be an ancestor :param rev: Rev to test against ancestor_rev :return: ``True``, ancestor_rev is an accestor to rev.
def append_sfixed64(self, value): sign = (value & 0x8000000000000000) and -1 or 0 if value >> 64 != sign: raise errors.EncodeError('SFixed64 out of range: %d' % value) self._stream.append_little_endian64(value & 0xffffffffffffffff)
Appends a signed 64-bit integer to our buffer, in little-endian byte-order.
def d8distdowntostream(np, p, fel, src, dist, distancemethod, thresh, workingdir=None, mpiexedir=None, exedir=None, log_file=None, runtime_file=None, hostfile=None): fname = TauDEM.func_name('d8distdowntostream') return TauDEM.run(FileClass.get_executable_fullpath(fname, exedir), {'-fel': fel, '-p': p, '-src': src}, workingdir, {'-thresh': thresh, '-m': TauDEM.convertdistmethod(distancemethod)}, {'-dist': dist}, {'mpipath': mpiexedir, 'hostfile': hostfile, 'n': np}, {'logfile': log_file, 'runtimefile': runtime_file})
Run D8 distance down to stream by different method for distance. This function is extended from d8hdisttostrm by Liangjun. Please clone `TauDEM by lreis2415`_ and compile for this program. .. _TauDEM by lreis2415: https://github.com/lreis2415/TauDEM
def on_master_missing(self): self.info("We could not contact the master agency, starting a new one") if self._starting_master: self.info("Master already starting, waiting for it") return if self._shutdown_task is not None: self.info("Not spwaning master because we are about to terminate " "ourselves") return if self._startup_task is not None: raise error.FeatError("Standalone started without a previous " "master agency already running, terminating " "it") if self._acquire_lock(): self._starting_master = True self._release_lock_cl = time.callLater(10, self._release_lock) return self._spawn_agency('master')
Tries to spawn a master agency if the slave agency failed to connect for several times. To avoid several slave agencies spawning the master agency a file lock is used
def add_synonym(self, syn): n = self.node(syn.class_id) if 'meta' not in n: n['meta'] = {} meta = n['meta'] if 'synonyms' not in meta: meta['synonyms'] = [] meta['synonyms'].append(syn.as_dict())
Adds a synonym for a node
def parse_lines(self, lines: Iterable[str]) -> List[ParseResults]: return [ self.parseString(line, line_number) for line_number, line in enumerate(lines) ]
Parse multiple lines in succession.
def format_diff_xml(a_xml, b_xml): return '\n'.join( difflib.ndiff( reformat_to_pretty_xml(a_xml).splitlines(), reformat_to_pretty_xml(b_xml).splitlines(), ) )
Create a diff between two XML documents. Args: a_xml: str b_xml: str Returns: str : `Differ`-style delta
def _colorize(self, msg, color=None, encode=False): colors = { 'red': '31', 'green': '32', 'yellow': '33' } if not color or not color in colors: return msg if encode: return u'\x1b[1;{}m{}\x1b[0m'.format(colors[color], msg) return '\x1b[1;{}m{}\x1b[0m'.format(colors[color], msg)
Colorize a string.