code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def value_at(self, x): x = Quantity(x, self.xindex.unit).value try: idx = (self.xindex.value == x).nonzero()[0][0] except IndexError as e: e.args = ("Value %r not found in array index" % x,) raise return self[idx]
Return the value of this `Series` at the given `xindex` value Parameters ---------- x : `float`, `~astropy.units.Quantity` the `xindex` value at which to search Returns ------- y : `~astropy.units.Quantity` the value of this Series at the given `xindex` value
def chain_functions(functions): functions = list(functions) if not functions: return _no_op elif len(functions) == 1: return functions[0] else: return partial(reduce, lambda res, f: f(res), functions)
Chain a list of single-argument functions together and return. The functions are applied in list order, and the output of the previous functions is passed to the next function. Parameters ---------- functions : list A list of single-argument functions to chain together. Returns ------- func : callable A single argument function. Examples -------- Chain several functions together! >>> funcs = [lambda x: x * 4, len, lambda x: x + 5] >>> func = chain_functions(funcs) >>> func('hey') 17
def update(self, item, id_expression=None, upsert=False, update_ops={}, safe=None, **kwargs): if safe is None: safe = self.safe self.queue.append(UpdateDocumentOp(self.transaction_id, self, item, safe, id_expression=id_expression, upsert=upsert, update_ops=update_ops, **kwargs)) if self.autoflush: return self.flush()
Update an item in the database. Uses the on_update keyword to each field to decide which operations to do, or. :param item: An instance of a :class:`~ommongo.document.Document` \ subclass :param id_expression: A query expression that uniquely picks out \ the item which should be updated. If id_expression is not \ passed, update uses item.mongo_id. :param upsert: Whether the update operation should be an upsert. \ If the item may not be in the database yet this should be True :param update_ops: By default the operation used to update a field \ is specified with the on_update argument to its constructor. \ To override that value, use this dictionary, with \ :class:`~ommongo.document.QueryField` objects as the keys \ and the mongo operation to use as the values. :param kwargs: The kwargs are merged into update_ops dict to \ decide which fields to update the operation for. These can \ only be for the top-level document since the keys \ are just strings. .. warning:: This operation is **experimental** and **not fully tested**, although it does have code coverage.
def list_flavors(self, limit=None, marker=None): return self._flavor_manager.list(limit=limit, marker=marker)
Returns a list of all available Flavors.
def estimate(self, observations, weights): N, M = self._output_probabilities.shape K = len(observations) self._output_probabilities = np.zeros((N, M)) if self.__impl__ == self.__IMPL_C__: for k in range(K): dc.update_pout(observations[k], weights[k], self._output_probabilities, dtype=config.dtype) elif self.__impl__ == self.__IMPL_PYTHON__: for k in range(K): for o in range(M): times = np.where(observations[k] == o)[0] self._output_probabilities[:, o] += np.sum(weights[k][times, :], axis=0) else: raise RuntimeError('Implementation '+str(self.__impl__)+' not available') self._output_probabilities /= np.sum(self._output_probabilities, axis=1)[:, None]
Maximum likelihood estimation of output model given the observations and weights Parameters ---------- observations : [ ndarray(T_k) ] with K elements A list of K observation trajectories, each having length T_k weights : [ ndarray(T_k, N) ] with K elements A list of K weight matrices, each having length T_k and containing the probability of any of the states in the given time step Examples -------- Generate an observation model and samples from each state. >>> import numpy as np >>> ntrajectories = 3 >>> nobs = 1000 >>> B = np.array([[0.5,0.5],[0.1,0.9]]) >>> output_model = DiscreteOutputModel(B) >>> from scipy import stats >>> nobs = 1000 >>> obs = np.empty(nobs, dtype = object) >>> weights = np.empty(nobs, dtype = object) >>> gens = [stats.rv_discrete(values=(range(len(B[i])), B[i])) for i in range(B.shape[0])] >>> obs = [gens[i].rvs(size=nobs) for i in range(B.shape[0])] >>> weights = [np.zeros((nobs, B.shape[1])) for i in range(B.shape[0])] >>> for i in range(B.shape[0]): weights[i][:, i] = 1.0 Update the observation model parameters my a maximum-likelihood fit. >>> output_model.estimate(obs, weights)
def _validate_geometry(self, geometry): if geometry is not None and geometry not in self.valid_geometries: raise InvalidParameterError("{} is not a valid geometry".format(geometry)) return geometry
Validates geometry, raising error if invalid.
def get_all_subscriptions(self, next_token=None): params = {'ContentType' : 'JSON'} if next_token: params['NextToken'] = next_token response = self.make_request('ListSubscriptions', params, '/', 'GET') body = response.read() if response.status == 200: return json.loads(body) else: boto.log.error('%s %s' % (response.status, response.reason)) boto.log.error('%s' % body) raise self.ResponseError(response.status, response.reason, body)
Get list of all subscriptions. :type next_token: string :param next_token: Token returned by the previous call to this method.
def _make_style_str(styledict): s = '' for key in list(styledict.keys()): s += "%s:%s;" % (key, styledict[key]) return s
Make an SVG style string from the dictionary. See also _parse_style_str also.
def clean_path(path): path = path.replace(os.sep, '/') if path.startswith('./'): path = path[2:] return path
Clean the path
def linear(self, x): with tf.name_scope("presoftmax_linear"): batch_size = tf.shape(x)[0] length = tf.shape(x)[1] x = tf.reshape(x, [-1, self.hidden_size]) logits = tf.matmul(x, self.shared_weights, transpose_b=True) return tf.reshape(logits, [batch_size, length, self.vocab_size])
Computes logits by running x through a linear layer. Args: x: A float32 tensor with shape [batch_size, length, hidden_size] Returns: float32 tensor with shape [batch_size, length, vocab_size].
def import_checks(path): dir = internal.check_dir / path file = internal.load_config(dir)["checks"] mod = internal.import_file(dir.name, (dir / file).resolve()) sys.modules[dir.name] = mod return mod
Import checks module given relative path. :param path: relative path from which to import checks module :type path: str :returns: the imported module :raises FileNotFoundError: if ``path / .check50.yaml`` does not exist :raises yaml.YAMLError: if ``path / .check50.yaml`` is not a valid YAML file This function is particularly useful when a set of checks logically extends another, as is often the case in CS50's own problems that have a "less comfy" and "more comfy" version. The "more comfy" version can include all of the "less comfy" checks like so:: less = check50.import_checks("../less") from less import * .. note:: the ``__name__`` of the imported module is given by the basename of the specified path (``less`` in the above example).
def to_unix(cls, timestamp): if not isinstance(timestamp, datetime.datetime): raise TypeError('Time.milliseconds expects a datetime object') base = time.mktime(timestamp.timetuple()) return base
Wrapper over time module to produce Unix epoch time as a float
def _init_entri(self, laman): sup = BeautifulSoup(laman.text, 'html.parser') estr = '' for label in sup.find('hr').next_siblings: if label.name == 'hr': self.entri.append(Entri(estr)) break if label.name == 'h2': if estr: self.entri.append(Entri(estr)) estr = '' estr += str(label).strip()
Membuat objek-objek entri dari laman yang diambil. :param laman: Laman respons yang dikembalikan oleh KBBI daring. :type laman: Response
def authorization_url(self, **kwargs): kwargs.setdefault('access_type', 'offline') url, state = self.oauth2session.authorization_url( self.client_config['auth_uri'], **kwargs) return url, state
Generates an authorization URL. This is the first step in the OAuth 2.0 Authorization Flow. The user's browser should be redirected to the returned URL. This method calls :meth:`requests_oauthlib.OAuth2Session.authorization_url` and specifies the client configuration's authorization URI (usually Google's authorization server) and specifies that "offline" access is desired. This is required in order to obtain a refresh token. Args: kwargs: Additional arguments passed through to :meth:`requests_oauthlib.OAuth2Session.authorization_url` Returns: Tuple[str, str]: The generated authorization URL and state. The user must visit the URL to complete the flow. The state is used when completing the flow to verify that the request originated from your application. If your application is using a different :class:`Flow` instance to obtain the token, you will need to specify the ``state`` when constructing the :class:`Flow`.
def set_expiration(self, key, ignore_missing=False, additional_seconds=None, seconds=None): if key not in self.time_dict and ignore_missing: return elif key not in self.time_dict and not ignore_missing: raise Exception('Key missing from `TimedDict` and ' '`ignore_missing` is False.') if additional_seconds is not None: self.time_dict[key] += additional_seconds elif seconds is not None: self.time_dict[key] = time.time() + seconds
Alters the expiration time for a key. If the key is not present, then raise an Exception unless `ignore_missing` is set to `True`. Args: key: The key whose expiration we are changing. ignore_missing (bool): If set, then return silently if the key does not exist. Default is `False`. additional_seonds (int): Add this many seconds to the current expiration time. seconds (int): Expire the key this many seconds from now.
def login(config, api_key=""): if not api_key: info_out( "If you don't have an API Key, go to:\n" "https://bugzilla.mozilla.org/userprefs.cgi?tab=apikey\n" ) api_key = getpass.getpass("API Key: ") url = urllib.parse.urljoin(config.bugzilla_url, "/rest/whoami") assert url.startswith("https://"), url response = requests.get(url, params={"api_key": api_key}) if response.status_code == 200: if response.json().get("error"): error_out("Failed - {}".format(response.json())) else: update( config.configfile, { "BUGZILLA": { "bugzilla_url": config.bugzilla_url, "api_key": api_key, } }, ) success_out("Yay! It worked!") else: error_out("Failed - {} ({})".format(response.status_code, response.json()))
Store your Bugzilla API Key
def tokens(cls, tokens): return cls(Lnk.TOKENS, tuple(map(int, tokens)))
Create a Lnk object for a token range. Args: tokens: a list of token identifiers
def on_post(resc, req, resp): signals.pre_req.send(resc.model) signals.pre_req_create.send(resc.model) props = req.deserialize() model = resc.model() from_rest(model, props) goldman.sess.store.create(model) props = to_rest_model(model, includes=req.includes) resp.last_modified = model.updated resp.location = '%s/%s' % (req.path, model.rid_value) resp.status = falcon.HTTP_201 resp.serialize(props) signals.post_req.send(resc.model) signals.post_req_create.send(resc.model)
Deserialize the payload & create the new single item
def kill(self, name): if not name: raise ValueError("Name can't be None or empty") with self.__instances_lock: try: stored_instance = self.__instances.pop(name) factory_context = stored_instance.context.factory_context stored_instance.kill() factory_context.is_singleton_active = False except KeyError: try: context, _ = self.__waiting_handlers.pop(name) context.factory_context.is_singleton_active = False except KeyError: raise ValueError( "Unknown component instance '{0}'".format(name) )
Kills the given component :param name: Name of the component to kill :raise ValueError: Invalid component name
def growthfromrange(rangegrowth, startdate, enddate): _yrs = (pd.Timestamp(enddate) - pd.Timestamp(startdate)).total_seconds() /\ dt.timedelta(365.25).total_seconds() return yrlygrowth(rangegrowth, _yrs)
Annual growth given growth from start date to end date.
def winnow_by_keys(dct, keys=None, filter_func=None): has = {} has_not = {} for key in dct: key_passes_check = False if keys is not None: key_passes_check = key in keys elif filter_func is not None: key_passes_check = filter_func(key) if key_passes_check: has[key] = dct[key] else: has_not[key] = dct[key] return WinnowedResult(has, has_not)
separates a dict into has-keys and not-has-keys pairs, using either a list of keys or a filtering function.
def determine_repo_dir(template, abbreviations, clone_to_dir, checkout, no_input, password=None): template = expand_abbreviations(template, abbreviations) if is_zip_file(template): unzipped_dir = unzip( zip_uri=template, is_url=is_repo_url(template), clone_to_dir=clone_to_dir, no_input=no_input, password=password ) repository_candidates = [unzipped_dir] cleanup = True elif is_repo_url(template): cloned_repo = clone( repo_url=template, checkout=checkout, clone_to_dir=clone_to_dir, no_input=no_input, ) repository_candidates = [cloned_repo] cleanup = False else: repository_candidates = [ template, os.path.join(clone_to_dir, template) ] cleanup = False for repo_candidate in repository_candidates: if repository_has_cookiecutter_json(repo_candidate): return repo_candidate, cleanup raise RepositoryNotFound( 'A valid repository for "{}" could not be found in the following ' 'locations:\n{}'.format( template, '\n'.join(repository_candidates) ) )
Locate the repository directory from a template reference. Applies repository abbreviations to the template reference. If the template refers to a repository URL, clone it. If the template is a path to a local repository, use it. :param template: A directory containing a project template directory, or a URL to a git repository. :param abbreviations: A dictionary of repository abbreviation definitions. :param clone_to_dir: The directory to clone the repository into. :param checkout: The branch, tag or commit ID to checkout after clone. :param no_input: Prompt the user at command line for manual configuration? :param password: The password to use when extracting the repository. :return: A tuple containing the cookiecutter template directory, and a boolean descriving whether that directory should be cleaned up after the template has been instantiated. :raises: `RepositoryNotFound` if a repository directory could not be found.
def pretty_dump(fn): @wraps(fn) def pretty_dump_wrapper(*args, **kwargs): response.content_type = "application/json; charset=utf-8" return json.dumps( fn(*args, **kwargs), indent=4, separators=(',', ': ') ) return pretty_dump_wrapper
Decorator used to output prettified JSON. ``response.content_type`` is set to ``application/json; charset=utf-8``. Args: fn (fn pointer): Function returning any basic python data structure. Returns: str: Data converted to prettified JSON.
def _maybe_normalize(self, var): if self.normalize: try: return self._norm.normalize(var) except HGVSUnsupportedOperationError as e: _logger.warning(str(e) + "; returning unnormalized variant") return var
normalize variant if requested, and ignore HGVSUnsupportedOperationError This is better than checking whether the variant is intronic because future UTAs will support LRG, which will enable checking intronic variants.
def addFailure(self, test, err, capt=None, tbinfo=None): self.__insert_test_result(constants.State.FAILURE, test, err)
After a test failure, we want to record testcase run information.
def add_code_mapping(self, from_pdb_code, to_pdb_code): if from_pdb_code in self.code_map: assert(self.code_map[from_pdb_code] == to_pdb_code) else: self.code_map[from_pdb_code] = to_pdb_code
Add a code mapping without a given instance.
def create_query(self, fields=None): if fields is None: return Query(self.fields) non_contained_fields = set(fields) - set(self.fields) if non_contained_fields: raise BaseLunrException( "Fields {} are not part of the index", non_contained_fields ) return Query(fields)
Convenience method to create a Query with the Index's fields. Args: fields (iterable, optional): The fields to include in the Query, defaults to the Index's `all_fields`. Returns: Query: With the specified fields or all the fields in the Index.
def tx_for_tx_hash(self, tx_hash): try: url_append = "?token=%s&includeHex=true" % self.api_key url = self.base_url("txs/%s%s" % (b2h_rev(tx_hash), url_append)) result = json.loads(urlopen(url).read().decode("utf8")) tx = Tx.parse(io.BytesIO(h2b(result.get("hex")))) return tx except Exception: raise Exception
returns the pycoin.tx object for tx_hash
def reset(self): self.idx_annotations.setText('Load Annotation File...') self.idx_rater.setText('') self.annot = None self.dataset_markers = None self.idx_marker.clearContents() self.idx_marker.setRowCount(0) w1 = self.idx_summary.takeAt(1).widget() w2 = self.idx_summary.takeAt(1).widget() self.idx_summary.removeWidget(w1) self.idx_summary.removeWidget(w2) w1.deleteLater() w2.deleteLater() b1 = QGroupBox('Staging') b2 = QGroupBox('Signal quality') self.idx_summary.addWidget(b1) self.idx_summary.addWidget(b2) self.display_eventtype() self.update_annotations() self.parent.create_menubar()
Remove all annotations from window.
def eventsource_connect(url, io_loop=None, callback=None, connect_timeout=None): if io_loop is None: io_loop = IOLoop.current() if isinstance(url, httpclient.HTTPRequest): assert connect_timeout is None request = url request.headers = httputil.HTTPHeaders(request.headers) else: request = httpclient.HTTPRequest( url, connect_timeout=connect_timeout, headers=httputil.HTTPHeaders({ "Accept-Encoding": "identity" }) ) request = httpclient._RequestProxy( request, httpclient.HTTPRequest._DEFAULTS) conn = EventSourceClient(io_loop, request) if callback is not None: io_loop.add_future(conn.connect_future, callback) return conn.connect_future
Client-side eventsource support. Takes a url and returns a Future whose result is a `EventSourceClient`.
def frommembers(cls, members=()): return cls.fromint(sum(map(cls._map.__getitem__, set(members))))
Create a set from an iterable of members.
def sort_dicoms(dicoms): dicom_input_sorted_x = sorted(dicoms, key=lambda x: (x.ImagePositionPatient[0])) dicom_input_sorted_y = sorted(dicoms, key=lambda x: (x.ImagePositionPatient[1])) dicom_input_sorted_z = sorted(dicoms, key=lambda x: (x.ImagePositionPatient[2])) diff_x = abs(dicom_input_sorted_x[-1].ImagePositionPatient[0] - dicom_input_sorted_x[0].ImagePositionPatient[0]) diff_y = abs(dicom_input_sorted_y[-1].ImagePositionPatient[1] - dicom_input_sorted_y[0].ImagePositionPatient[1]) diff_z = abs(dicom_input_sorted_z[-1].ImagePositionPatient[2] - dicom_input_sorted_z[0].ImagePositionPatient[2]) if diff_x >= diff_y and diff_x >= diff_z: return dicom_input_sorted_x if diff_y >= diff_x and diff_y >= diff_z: return dicom_input_sorted_y if diff_z >= diff_x and diff_z >= diff_y: return dicom_input_sorted_z
Sort the dicoms based om the image possition patient :param dicoms: list of dicoms
def rebin_scale(a, scale=1): newshape = tuple((side * scale) for side in a.shape) return rebin(a, newshape)
Scale an array to a new shape.
def active_days(records): days = set(r.datetime.date() for r in records) return len(days)
The number of days during which the user was active. A user is considered active if he sends a text, receives a text, initiates a call, receives a call, or has a mobility point.
def mergeStyles(self, styles): " XXX Bugfix for use in PISA " for k, v in six.iteritems(styles): if k in self and self[k]: self[k] = copy.copy(self[k]) self[k].update(v) else: self[k] = v
XXX Bugfix for use in PISA
def get_token(self, code=None): tokenobj = self.steemconnect().get_access_token(code) for t in tokenobj: if t == 'error': self.msg.error_message(str(tokenobj[t])) return False elif t == 'access_token': self.username = tokenobj['username'] self.refresh_token = tokenobj['refresh_token'] return tokenobj[t]
Uses a SteemConnect refresh token to retreive an access token
def _get_ruuvitag_datas(macs=[], search_duratio_sec=None, run_flag=RunFlag(), bt_device=''): mac_blacklist = [] start_time = time.time() data_iter = ble.get_datas(mac_blacklist, bt_device) for ble_data in data_iter: if search_duratio_sec and time.time() - start_time > search_duratio_sec: data_iter.send(StopIteration) break if not run_flag.running: data_iter.send(StopIteration) break if macs and not ble_data[0] in macs: continue (data_format, data) = RuuviTagSensor.convert_data(ble_data[1]) if data is not None: state = get_decoder(data_format).decode_data(data) if state is not None: yield (ble_data[0], state) else: mac_blacklist.append(ble_data[0])
Get data from BluetoothCommunication and handle data encoding. Args: macs (list): MAC addresses. Default empty list search_duratio_sec (int): Search duration in seconds. Default None run_flag (object): RunFlag object. Function executes while run_flag.running. Default new RunFlag bt_device (string): Bluetooth device id Yields: tuple: MAC and State of RuuviTag sensor data
def delete(self, exchange, if_unused=False, nowait=True, ticket=None, cb=None): nowait = nowait and self.allow_nowait() and not cb args = Writer() args.write_short(ticket or self.default_ticket).\ write_shortstr(exchange).\ write_bits(if_unused, nowait) self.send_frame(MethodFrame(self.channel_id, 40, 20, args)) if not nowait: self._delete_cb.append(cb) self.channel.add_synchronous_cb(self._recv_delete_ok)
Delete an exchange.
def createStopOrder(self, quantity, parentId=0, stop=0., trail=None, transmit=True, group=None, stop_limit=False, rth=False, tif="DAY", account=None): if trail: if trail == "percent": order = self.createOrder(quantity, trailingPercent = stop, transmit = transmit, orderType = dataTypes["ORDER_TYPE_TRAIL_STOP"], ocaGroup = group, parentId = parentId, rth = rth, tif = tif, account = account ) else: order = self.createOrder(quantity, trailStopPrice = stop, stop = stop, transmit = transmit, orderType = dataTypes["ORDER_TYPE_TRAIL_STOP"], ocaGroup = group, parentId = parentId, rth = rth, tif = tif, account = account ) else: order = self.createOrder(quantity, stop = stop, price = stop if stop_limit else 0., transmit = transmit, orderType = dataTypes["ORDER_TYPE_STOP_LIMIT"] if stop_limit else dataTypes["ORDER_TYPE_STOP"], ocaGroup = group, parentId = parentId, rth = rth, tif = tif, account = account ) return order
Creates STOP order
def to_bqm(self, model): linear = ((v, float(model.get_py_value(bias))) for v, bias in self.linear.items()) quadratic = ((u, v, float(model.get_py_value(bias))) for (u, v), bias in self.quadratic.items()) offset = float(model.get_py_value(self.offset)) return dimod.BinaryQuadraticModel(linear, quadratic, offset, dimod.SPIN)
Given a pysmt model, return a bqm. Adds the values of the biases as determined by the SMT solver to a bqm. Args: model: A pysmt model. Returns: :obj:`dimod.BinaryQuadraticModel`
def get_grid_mapping_variables(ds): grid_mapping_variables = [] for ncvar in ds.get_variables_by_attributes(grid_mapping=lambda x: x is not None): if ncvar.grid_mapping in ds.variables: grid_mapping_variables.append(ncvar.grid_mapping) return grid_mapping_variables
Returns a list of grid mapping variables :param netCDF4.Dataset ds: An open netCDF4 Dataset
def absent(name, **connection_args): ret = {'name': name, 'changes': {}, 'result': True, 'comment': ''} if __salt__['mysql.db_exists'](name, **connection_args): if __opts__['test']: ret['result'] = None ret['comment'] = \ 'Database {0} is present and needs to be removed'.format(name) return ret if __salt__['mysql.db_remove'](name, **connection_args): ret['comment'] = 'Database {0} has been removed'.format(name) ret['changes'][name] = 'Absent' return ret else: err = _get_mysql_error() if err is not None: ret['comment'] = 'Unable to remove database {0} ' \ '({1})'.format(name, err) ret['result'] = False return ret else: err = _get_mysql_error() if err is not None: ret['comment'] = err ret['result'] = False return ret ret['comment'] = ('Database {0} is not present, so it cannot be removed' ).format(name) return ret
Ensure that the named database is absent name The name of the database to remove
def _remove_untraceable(self): self._probe_mapping = {} wvs = {wv for wv in self.tracer.wires_to_track if self._traceable(wv)} self.tracer.wires_to_track = wvs self.tracer._wires = {wv.name: wv for wv in wvs} self.tracer.trace.__init__(wvs)
Remove from the tracer those wires that CompiledSimulation cannot track. Create _probe_mapping for wires only traceable via probes.
def _raw_input_contains_national_prefix(raw_input, national_prefix, region_code): nnn = normalize_digits_only(raw_input) if nnn.startswith(national_prefix): try: return is_valid_number(parse(nnn[len(national_prefix):], region_code)) except NumberParseException: return False return False
Check if raw_input, which is assumed to be in the national format, has a national prefix. The national prefix is assumed to be in digits-only form.
def obj_deref(ref): from indico_livesync.models.queue import EntryType if ref['type'] == EntryType.category: return Category.get_one(ref['category_id']) elif ref['type'] == EntryType.event: return Event.get_one(ref['event_id']) elif ref['type'] == EntryType.session: return Session.get_one(ref['session_id']) elif ref['type'] == EntryType.contribution: return Contribution.get_one(ref['contrib_id']) elif ref['type'] == EntryType.subcontribution: return SubContribution.get_one(ref['subcontrib_id']) else: raise ValueError('Unexpected object type: {}'.format(ref['type']))
Returns the object identified by `ref`
def transform_attrs(obj, keys, container, func, extras=None): cpextras = extras for reportlab, css in keys: extras = cpextras if extras is None: extras = [] elif not isinstance(extras, list): extras = [extras] if css in container: extras.insert(0, container[css]) setattr(obj, reportlab, func(*extras) )
Allows to apply one function to set of keys cheching if key is in container, also trasform ccs key to report lab keys. extras = Are extra params for func, it will be call like func(*[param1, param2]) obj = frag keys = [(reportlab, css), ... ] container = cssAttr
def send(self, to, from_, body): try: msg = self.client.sms.messages.create( body=body, to=to, from_=from_ ) print msg.sid except twilio.TwilioRestException as e: raise
Send BODY to TO from FROM as an SMS!
def _enable_logpersist(self): if not self._ad.is_rootable: return logpersist_warning = ('%s encountered an error enabling persistent' ' logs, logs may not get saved.') if not self._ad.adb.has_shell_command('logpersist.start'): logging.warning(logpersist_warning, self) return try: self._ad.adb.shell('logpersist.stop --clear') self._ad.adb.shell('logpersist.start') except adb.AdbError: logging.warning(logpersist_warning, self)
Attempts to enable logpersist daemon to persist logs.
def to_rest_models(models, includes=None): props = {} props['data'] = [] for model in models: props['data'].append(_to_rest(model, includes=includes)) props['included'] = _to_rest_includes(models, includes=includes) return props
Convert the models into a dict for serialization models should be an array of single model objects that will each be serialized. :return: dict
def _build_time(time, kwargs): tz = kwargs.pop('tz', 'UTC') if time: if kwargs: raise ValueError('Cannot pass kwargs and a time') else: return ensure_utc(time, tz) elif not kwargs: raise ValueError('Must pass a time or kwargs') else: return datetime.time(**kwargs)
Builds the time argument for event rules.
def analyze(output_dir, dataset, cloud=False, project_id=None): job = analyze_async( output_dir=output_dir, dataset=dataset, cloud=cloud, project_id=project_id) job.wait() print('Analyze: ' + str(job.state))
Blocking version of analyze_async. See documentation of analyze_async.
def revoke_token(token_id, user): try: token = TokenBlacklist.query.filter_by(id=token_id, user_identity=user).one() token.revoked = True db.session.commit() except NoResultFound: raise TokenNotFound("Could not find the token {}".format(token_id))
Revokes the given token. Raises a TokenNotFound error if the token does not exist in the database
def convert_timestamp(timestamp): datetime = dt.datetime.utcfromtimestamp(timestamp/1000.) return np.datetime64(datetime.replace(tzinfo=None))
Converts bokehJS timestamp to datetime64.
def filter(objects, Type=None, min=-1, max=-1): res = [] if min > max: raise ValueError("minimum must be smaller than maximum") if Type is not None: res = [o for o in objects if isinstance(o, Type)] if min > -1: res = [o for o in res if _getsizeof(o) < min] if max > -1: res = [o for o in res if _getsizeof(o) > max] return res
Filter objects. The filter can be by type, minimum size, and/or maximum size. Keyword arguments: Type -- object type to filter by min -- minimum object size max -- maximum object size
def add_nodes_from(self, nodes, weights=None): nodes = list(nodes) if weights: if len(nodes) != len(weights): raise ValueError("The number of elements in nodes and weights" "should be equal.") for index in range(len(nodes)): self.add_node(node=nodes[index], weight=weights[index]) else: for node in nodes: self.add_node(node=node)
Add multiple nodes to the Graph. **The behviour of adding weights is different than in networkx. Parameters ---------- nodes: iterable container A container of nodes (list, dict, set, or any hashable python object). weights: list, tuple (default=None) A container of weights (int, float). The weight value at index i is associated with the variable at index i. Examples -------- >>> from pgmpy.base import DAG >>> G = DAG() >>> G.add_nodes_from(nodes=['A', 'B', 'C']) >>> sorted(G.nodes()) ['A', 'B', 'C'] Adding nodes with weights: >>> G.add_nodes_from(nodes=['D', 'E'], weights=[0.3, 0.6]) >>> G.node['D'] {'weight': 0.3} >>> G.node['E'] {'weight': 0.6} >>> G.node['A'] {'weight': None}
def add_crosshair_to_image(fname, opFilename): im = Image.open(fname) draw = ImageDraw.Draw(im) draw.line((0, 0) + im.size, fill=(255, 255, 255)) draw.line((0, im.size[1], im.size[0], 0), fill=(255, 255, 255)) del draw im.save(opFilename)
convert an image by adding a cross hair
def supernodes(par, post, colcount): snpar, flag = pothen_sun(par, post, colcount) n = len(par) N = len(snpar) snode = matrix(0, (n,1)) snptr = matrix(0, (N+1,1)) slist = [[] for i in range(n)] for i in range(n): f = flag[i] if f < 0: slist[i].append(i) else: slist[f].append(i) k = 0; j = 0 for i,sl in enumerate(slist): nsl = len(sl) if nsl > 0: snode[k:k+nsl] = matrix(sl) snptr[j+1] = snptr[j] + nsl k += nsl j += 1 return snode, snptr, snpar
Find supernodes. ARGUMENTS par parent array post array with post ordering colcount array with column counts RETURNS snode array with supernodes; snode[snptr[k]:snptr[k+1]] contains the indices of supernode k snptr pointer array; snptr[k] is the index of the representative vertex of supernode k in the snode array snpar supernodal parent structure
def rerun(version="3.7.0"): from commandlib import Command Command(DIR.gen.joinpath("py{0}".format(version), "bin", "python"))( DIR.gen.joinpath("state", "examplepythoncode.py") ).in_dir(DIR.gen.joinpath("state")).run()
Rerun last example code block with specified version of python.
def rebuild(self, image): if isinstance(image, Image): image = image.id return self.act(type='rebuild', image=image)
Rebuild the droplet with the specified image A rebuild action functions just like a new create. [APIDocs]_ :param image: an image ID, an image slug, or an `Image` object representing the image the droplet should use as a base :type image: integer, string, or `Image` :return: an `Action` representing the in-progress operation on the droplet :rtype: Action :raises DOAPIError: if the API endpoint replies with an error
def stream_fastq_full(fastq, threads): logging.info("Nanoget: Starting to collect full metrics from plain fastq file.") inputfastq = handle_compressed_input(fastq) with cfutures.ProcessPoolExecutor(max_workers=threads) as executor: for results in executor.map(extract_all_from_fastq, SeqIO.parse(inputfastq, "fastq")): yield results logging.info("Nanoget: Finished collecting statistics from plain fastq file.")
Generator for returning metrics extracted from fastq. Extract from a fastq file: -readname -average and median quality -read_lenght
def check_cond_latents(cond_latents, hparams): if cond_latents is None: return if not isinstance(cond_latents[0], list): cond_latents = [cond_latents] exp_num_latents = hparams.num_cond_latents if hparams.latent_dist_encoder == "conv_net": exp_num_latents += int(hparams.cond_first_frame) if len(cond_latents) != exp_num_latents: raise ValueError("Expected number of cond_latents: %d, got %d" % (exp_num_latents, len(cond_latents))) for cond_latent in cond_latents: if len(cond_latent) != hparams.n_levels - 1: raise ValueError("Expected level_latents to be %d, got %d" % (hparams.n_levels - 1, len(cond_latent)))
Shape checking for cond_latents.
def _sample_groups(problem, N, num_levels=4): if len(problem['groups']) != problem['num_vars']: raise ValueError("Groups do not match to number of variables") group_membership, _ = compute_groups_matrix(problem['groups']) if group_membership is None: raise ValueError("Please define the 'group_membership' matrix") if not isinstance(group_membership, np.ndarray): raise TypeError("Argument 'group_membership' should be formatted \ as a numpy ndarray") num_params = group_membership.shape[0] num_groups = group_membership.shape[1] sample = np.zeros((N * (num_groups + 1), num_params)) sample = np.array([generate_trajectory(group_membership, num_levels) for n in range(N)]) return sample.reshape((N * (num_groups + 1), num_params))
Generate trajectories for groups Returns an :math:`N(g+1)`-by-:math:`k` array of `N` trajectories, where :math:`g` is the number of groups and :math:`k` is the number of factors Arguments --------- problem : dict The problem definition N : int The number of trajectories to generate num_levels : int, default=4 The number of grid levels Returns ------- numpy.ndarray
def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs): from pandas.io import packers return packers.to_msgpack(path_or_buf, self, encoding=encoding, **kwargs)
Serialize object to input file path using msgpack format. THIS IS AN EXPERIMENTAL LIBRARY and the storage format may not be stable until a future release. Parameters ---------- path : string File path, buffer-like, or None if None, return generated string append : bool whether to append to an existing msgpack (default is False) compress : type of compressor (zlib or blosc), default to None (no compression)
def fuller_scaling(target, DABo, To, Po, temperature='pore.temperature', pressure='pore.pressure'): r Ti = target[temperature] Pi = target[pressure] value = DABo*(Ti/To)**1.75*(Po/Pi) return value
r""" Uses Fuller model to adjust a diffusion coefficient for gases from reference conditions to conditions of interest Parameters ---------- target : OpenPNM Object The object for which these values are being calculated. This controls the length of the calculated array, and also provides access to other necessary thermofluid properties. DABo : float, array_like Diffusion coefficient at reference conditions Po, To : float, array_like Pressure & temperature at reference conditions, respectively pressure : string The dictionary key containing the pressure values in Pascals (Pa) temperature : string The dictionary key containing the temperature values in Kelvin (K)
def pad(self, *args, **kwargs): if not args: start, end = self.padding else: start, end = args if kwargs.pop('inplace', False): new = self else: new = self.copy() if kwargs: raise TypeError("unexpected keyword argument %r" % list(kwargs.keys())[0]) new.known = [(s[0]+start, s[1]+end) for s in self.known] new.active = [(s[0]+start, s[1]+end) for s in self.active] return new
Apply a padding to each segment in this `DataQualityFlag` This method either takes no arguments, in which case the value of the :attr:`~DataQualityFlag.padding` attribute will be used, or two values representing the padding for the start and end of each segment. For both the `start` and `end` paddings, a positive value means pad forward in time, so that a positive `start` pad or negative `end` padding will contract a segment at one or both ends, and vice-versa. This method will apply the same padding to both the `~DataQualityFlag.known` and `~DataQualityFlag.active` lists, but will not :meth:`~DataQualityFlag.coalesce` the result. Parameters ---------- start : `float` padding to apply to the start of the each segment end : `float` padding to apply to the end of each segment inplace : `bool`, optional, default: `False` modify this object in-place, default is `False`, i.e. return a copy of the original object with padded segments Returns ------- paddedflag : `DataQualityFlag` a view of the modified flag
def report_error_event(self, error_report): logger = self.logging_client.logger("errors") logger.log_struct(error_report)
Report error payload. :type error_report: dict :param: error_report: dict payload of the error report formatted according to https://cloud.google.com/error-reporting/docs/formatting-error-messages This object should be built using :meth:~`google.cloud.error_reporting.client._build_error_report`
def import_module(modulename): module = None try: module = importlib.import_module(modulename) except ImportError: if "." in modulename: modules = modulename.split(".") package = ".".join(modules[1:len(modules)]) module = importlib.import_module(package) else: raise return module
Static method for importing module modulename. Can handle relative imports as well. :param modulename: Name of module to import. Can be relative :return: imported module instance.
def mechanism_indices(self, direction): return { Direction.CAUSE: self.effect_indices, Direction.EFFECT: self.cause_indices }[direction]
The indices of nodes in the mechanism system.
def num_fmt(num, max_digits=None): r if num is None: return 'None' def num_in_mag(num, mag): return mag > num and num > (-1 * mag) if max_digits is None: if num_in_mag(num, 1): if num_in_mag(num, .1): max_digits = 4 else: max_digits = 3 else: max_digits = 1 if util_type.is_float(num): num_str = ('%.' + str(max_digits) + 'f') % num num_str = num_str.rstrip('0').lstrip('0') if num_str.startswith('.'): num_str = '0' + num_str if num_str.endswith('.'): num_str = num_str + '0' return num_str elif util_type.is_int(num): return int_comma_str(num) else: return '%r'
r""" Weird function. Not very well written. Very special case-y Args: num (int or float): max_digits (int): Returns: str: CommandLine: python -m utool.util_num --test-num_fmt Example: >>> # DISABLE_DOCTEST >>> from utool.util_num import * # NOQA >>> # build test data >>> num_list = [0, 0.0, 1.2, 1003232, 41431232., .0000000343, -.443243] >>> max_digits = None >>> # execute function >>> result = [num_fmt(num, max_digits) for num in num_list] >>> # verify results >>> print(result) ['0', '0.0', '1.2', '1,003,232', '41431232.0', '0.0', '-0.443']
def _point_plot_defaults(self, args, kwargs): if args: return args, kwargs if 'ls' not in kwargs and 'linestyle' not in kwargs: kwargs['linestyle'] = 'none' if 'marker' not in kwargs: kwargs['marker'] = 'o' return args, kwargs
To avoid confusion for new users, this ensures that "scattered" points are plotted by by `plot` instead of points joined by a line. Parameters ---------- args : tuple Arguments representing additional parameters to be passed to `self.plot`. kwargs : dict Keyword arguments representing additional parameters to be passed to `self.plot`. Returns ------- Modified versions of `args` and `kwargs`.
def extract_fields(self): dm = IDataManager(self.context) fieldnames = filter(lambda name: name not in self.ignore, self.keys) out = dict() for fieldname in fieldnames: try: fieldvalue = dm.json_data(fieldname) except Unauthorized: logger.debug("Skipping restricted field '%s'" % fieldname) continue except ValueError: logger.debug("Skipping invalid field '%s'" % fieldname) continue out[fieldname] = api.to_json_value(self.context, fieldname, fieldvalue) return out
Extract the given fieldnames from the object :returns: Schema name/value mapping :rtype: dict
def Get(self, flags, off): N.enforce_number(off, N.UOffsetTFlags) return flags.py_type(encode.Get(flags.packer_type, self.Bytes, off))
Get retrieves a value of the type specified by `flags` at the given offset.
def assign_unassigned_members(self, group_category_id, sync=None): path = {} data = {} params = {} path["group_category_id"] = group_category_id if sync is not None: data["sync"] = sync self.logger.debug("POST /api/v1/group_categories/{group_category_id}/assign_unassigned_members with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("POST", "/api/v1/group_categories/{group_category_id}/assign_unassigned_members".format(**path), data=data, params=params, single_item=True)
Assign unassigned members. Assign all unassigned members as evenly as possible among the existing student groups.
def reject(self, func): return self._wrap(list(filter(lambda val: not func(val), self.obj)))
Return all the elements for which a truth test fails.
def open_handle(self, dwDesiredAccess = win32.PROCESS_ALL_ACCESS): hProcess = win32.OpenProcess(dwDesiredAccess, win32.FALSE, self.dwProcessId) try: self.close_handle() except Exception: warnings.warn( "Failed to close process handle: %s" % traceback.format_exc()) self.hProcess = hProcess
Opens a new handle to the process. The new handle is stored in the L{hProcess} property. @warn: Normally you should call L{get_handle} instead, since it's much "smarter" and tries to reuse handles and merge access rights. @type dwDesiredAccess: int @param dwDesiredAccess: Desired access rights. Defaults to L{win32.PROCESS_ALL_ACCESS}. See: U{http://msdn.microsoft.com/en-us/library/windows/desktop/ms684880(v=vs.85).aspx} @raise WindowsError: It's not possible to open a handle to the process with the requested access rights. This tipically happens because the target process is a system process and the debugger is not runnning with administrative rights.
def ekopr(fname): fname = stypes.stringToCharP(fname) handle = ctypes.c_int() libspice.ekopr_c(fname, ctypes.byref(handle)) return handle.value
Open an existing E-kernel file for reading. http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/ekopr_c.html :param fname: Name of EK file. :type fname: str :return: Handle attached to EK file. :rtype: int
def dumps(self, fd, **kwargs): if 0 <= fd <= 2: data = [self.stdin, self.stdout, self.stderr][fd].concretize(**kwargs) if type(data) is list: data = b''.join(data) return data return self.get_fd(fd).concretize(**kwargs)
Returns the concrete content for a file descriptor. BACKWARD COMPATIBILITY: if you ask for file descriptors 0 1 or 2, it will return the data from stdin, stdout, or stderr as a flat string. :param fd: A file descriptor. :return: The concrete content. :rtype: str
def _SerializeEntries(entries): output = [] for python_format, wire_format, type_descriptor in entries: if wire_format is None or (python_format and type_descriptor.IsDirty(python_format)): wire_format = type_descriptor.ConvertToWireFormat(python_format) precondition.AssertIterableType(wire_format, bytes) output.extend(wire_format) return b"".join(output)
Serializes given triplets of python and wire values and a descriptor.
def add(self, template, resource, name=None): if hasattr(resource, '_rhino_meta'): route = Route( template, Resource(resource), name=name, ranges=self.ranges) else: route = Route( template, resource, name=name, ranges=self.ranges) obj_id = id(resource) if obj_id not in self._lookup: self._lookup[obj_id] = route if name is not None: if name in self.named_routes: raise InvalidArgumentError("A route named '%s' already exists in this %s object." % (name, self.__class__.__name__)) self.named_routes[name] = route self.routes.append(route)
Add a route to a resource. The optional `name` assigns a name to this route that can be used when building URLs. The name must be unique within this Mapper object.
def discovery_mdns(self): logging.getLogger("zeroconf").setLevel(logging.WARNING) self.context.install_bundle("pelix.remote.discovery.mdns").start() with use_waiting_list(self.context) as ipopo: ipopo.add(rs.FACTORY_DISCOVERY_ZEROCONF, "pelix-discovery-zeroconf")
Installs the mDNS discovery bundles and instantiates components
def associate_dhcp_options(self, dhcp_options_id, vpc_id): params = {'DhcpOptionsId': dhcp_options_id, 'VpcId' : vpc_id} return self.get_status('AssociateDhcpOptions', params)
Associate a set of Dhcp Options with a VPC. :type dhcp_options_id: str :param dhcp_options_id: The ID of the Dhcp Options :type vpc_id: str :param vpc_id: The ID of the VPC. :rtype: bool :return: True if successful
def _outfp_write_with_check(self, outfp, data, enable_overwrite_check=True): start = outfp.tell() outfp.write(data) if self._track_writes: end = outfp.tell() if end > self.pvd.space_size * self.pvd.logical_block_size(): raise pycdlibexception.PyCdlibInternalError('Wrote past the end of the ISO! (%d > %d)' % (end, self.pvd.space_size * self.pvd.logical_block_size())) if enable_overwrite_check: bisect.insort_left(self._write_check_list, self._WriteRange(start, end - 1))
Internal method to write data out to the output file descriptor, ensuring that it doesn't go beyond the bounds of the ISO. Parameters: outfp - The file object to write to. data - The actual data to write. enable_overwrite_check - Whether to do overwrite checking if it is enabled. Some pieces of code explicitly want to overwrite data, so this allows them to disable the checking. Returns: Nothing.
def get_job_log_url(self, project, **params): return self._get_json(self.JOB_LOG_URL_ENDPOINT, project, **params)
Gets job log url, filtered by parameters :param project: project (repository name) to query data for :param params: keyword arguments to filter results
def wol(mac, bcast='255.255.255.255', destport=9): dest = salt.utils.network.mac_str_to_bytes(mac) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1) sock.sendto(b'\xff' * 6 + dest * 16, (bcast, int(destport))) return True
Send a "Magic Packet" to wake up a Minion CLI Example: .. code-block:: bash salt-run network.wol 08-00-27-13-69-77 salt-run network.wol 080027136977 255.255.255.255 7 salt-run network.wol 08:00:27:13:69:77 255.255.255.255 7
def diff_levenshtein(self, diffs): levenshtein = 0 insertions = 0 deletions = 0 for (op, data) in diffs: if op == self.DIFF_INSERT: insertions += len(data) elif op == self.DIFF_DELETE: deletions += len(data) elif op == self.DIFF_EQUAL: levenshtein += max(insertions, deletions) insertions = 0 deletions = 0 levenshtein += max(insertions, deletions) return levenshtein
Compute the Levenshtein distance; the number of inserted, deleted or substituted characters. Args: diffs: Array of diff tuples. Returns: Number of changes.
def process_streamers(self): in_progress = self._stream_manager.in_progress() triggered = self.graph.check_streamers(blacklist=in_progress) for streamer in triggered: self._stream_manager.process_streamer(streamer, callback=self._handle_streamer_finished)
Check if any streamers should be handed to the stream manager.
def _commit(self): if not self.in_progress: raise ValueError(_CANT_COMMIT) commit_response = _commit_with_retry(self._client, self._write_pbs, self._id) self._clean_up() return list(commit_response.write_results)
Transactionally commit the changes accumulated. Returns: List[google.cloud.proto.firestore.v1beta1.\ write_pb2.WriteResult, ...]: The write results corresponding to the changes committed, returned in the same order as the changes were applied to this transaction. A write result contains an ``update_time`` field. Raises: ValueError: If no transaction is in progress.
def geo_search(user_id, search_location): url = "https://api.twitter.com/1.1/geo/search.json" params = {"query" : search_location } response = make_twitter_request(url, user_id, params).json() return response
Search for a location - free form
def get_properties(attributes): return [key for key, value in six.iteritems(attributes) if isinstance(value, property)]
Return tuple of names of defined properties. :type attributes: dict :rtype: list
def set_orient(self): self.orient = RADTODEG(N.arctan2(self.cd12,self.cd22))
Return the computed orientation based on CD matrix.
def run(self): if self.status: self.set_status('aborted') raise LimpydJobsException('This worker run is already terminated') self.set_status('starting') self.start_date = datetime.utcnow() if self.max_duration: self.wanted_end_date = self.start_date + self.max_duration must_stop = self.must_stop() if not must_stop: while not self.keys and not must_stop: self.update_keys() if not self.keys: sleep(self.fetch_priorities_delay) must_stop = self.must_stop() if not must_stop: self.requeue_delayed_jobs() self.run_started() self._main_loop() self.set_status('terminated') self.end_date = datetime.utcnow() self.run_ended() if self.terminate_gracefuly: self.stop_handling_end_signal()
The main method of the worker. Will ask redis for list items via blocking calls, get jobs from them, try to execute these jobs, and end when needed.
def _unpack(c, tmp, package, version, git_url=None): real_version = version[:] source = None if git_url: pass else: cwd = os.getcwd() print("Moving into temp dir %s" % tmp) os.chdir(tmp) try: flags = "--download=. --build=build --no-use-wheel" cmd = "pip install %s %s==%s" % (flags, package, version) c.run(cmd) globs = [] globexpr = "" for extension, opener in ( ("zip", "unzip"), ("tgz", "tar xzvf"), ("tar.gz", "tar xzvf"), ): globexpr = "*.{0}".format(extension) globs = glob(globexpr) if globs: break archive = os.path.basename(globs[0]) source, _, _ = archive.rpartition(".{0}".format(extension)) c.run("{0} {1}".format(opener, globexpr)) finally: os.chdir(cwd) return real_version, source
Download + unpack given package into temp dir ``tmp``. Return ``(real_version, source)`` where ``real_version`` is the "actual" version downloaded (e.g. if a Git master was indicated, it will be the SHA of master HEAD) and ``source`` is the source directory (relative to unpacked source) to import into ``<project>/vendor``.
def setup_user_manager(app): from flask_user import SQLAlchemyAdapter from rio.models import User init = dict( db_adapter=SQLAlchemyAdapter(db, User), ) user_manager.init_app(app, **init)
Setup flask-user manager.
def make_scratch_dirs(file_mapping, dry_run=True): scratch_dirs = {} for value in file_mapping.values(): scratch_dirname = os.path.dirname(value) scratch_dirs[scratch_dirname] = True for scratch_dirname in scratch_dirs: if dry_run: print("mkdir -f %s" % (scratch_dirname)) else: try: os.makedirs(scratch_dirname) except OSError: pass
Make any directories need in the scratch area
def refresh(self): j = self.vera_request(id='sdata', output_format='json').json() devices = j.get('devices') for device_data in devices: if device_data.get('id') == self.device_id: self.update(device_data)
Refresh the dev_info data used by get_value. Only needed if you're not using subscriptions.
def _rescale(self, points): return [( x, self._scale_diff + (y - self._scale_min_2nd) * self._scale if y is not None else None ) for x, y in points]
Scale for secondary
def get_health_events(self, recipient): if recipient not in self.addresses_events: self.start_health_check(recipient) return self.addresses_events[recipient]
Starts a healthcheck task for `recipient` and returns a HealthEvents with locks to react on its current state.
def preprocess(net, image): return np.float32(np.rollaxis(image, 2)[::-1]) - net.transformer.mean["data"]
convert to Caffe input image layout
def sort(self): beams = [v for (_, v) in self.entries.items()] sortedBeams = sorted(beams, reverse=True, key=lambda x: x.prTotal*x.prText) return [x.labeling for x in sortedBeams]
return beam-labelings, sorted by probability
def principal_inertia_components(self): components, vectors = inertia.principal_axis(self.moment_inertia) self._cache['principal_inertia_vectors'] = vectors return components
Return the principal components of inertia Ordering corresponds to mesh.principal_inertia_vectors Returns ---------- components : (3,) float Principal components of inertia