code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def get_cell_content(self): try: if self.code_array.cell_attributes[self.key]["button_cell"]: return except IndexError: return try: return self.code_array[self.key] except IndexError: pass
Returns cell content
def draw_graph(G: nx.DiGraph, filename: str): A = to_agraph(G) A.graph_attr["rankdir"] = "LR" A.draw(filename, prog="dot")
Draw a networkx graph with Pygraphviz.
def merge_and_fit(self, track, pairings): for (self_seg_index, track_seg_index, _) in pairings: self_s = self.segments[self_seg_index] ss_start = self_s.points[0] track_s = track.segments[track_seg_index] tt_start = track_s.points[0] tt_end = track_s.points[-1] d_start = ss_start.distance(tt_start) d_end = ss_start.distance(tt_end) if d_start > d_end: track_s = track_s.copy() track_s.points = list(reversed(track_s.points)) self_s.merge_and_fit(track_s) return self
Merges another track with this one, ordering the points based on a distance heuristic Args: track (:obj:`Track`): Track to merge with pairings Returns: :obj:`Segment`: self
def get_completions(self, candidates): queryset = BlockCompletion.user_course_completion_queryset(self._user, self._course_key).filter( block_key__in=candidates ) completions = BlockCompletion.completion_by_block_key(queryset) candidates_with_runs = [candidate.replace(course_key=self._course_key) for candidate in candidates] for candidate in candidates_with_runs: if candidate not in completions: completions[candidate] = 0.0 return completions
Given an iterable collection of block_keys in the course, returns a mapping of the block_keys to the present completion values of their associated blocks. If a completion is not found for a given block in the current course, 0.0 is returned. The service does not attempt to verify that the block exists within the course. Parameters: candidates: collection of BlockKeys within the current course. Note: Usage keys may not have the course run filled in for old mongo courses. This method checks for completion records against a set of BlockKey candidates with the course run filled in from self._course_key. Return value: dict[BlockKey] -> float: Mapping blocks to their completion value.
def filter_by_gene_expression( self, gene_expression_dict, min_expression_value=0.0): return self.filter_any_above_threshold( multi_key_fn=lambda effect: effect.gene_ids, value_dict=gene_expression_dict, threshold=min_expression_value)
Filters variants down to those which have overlap a gene whose expression value in the transcript_expression_dict argument is greater than min_expression_value. Parameters ---------- gene_expression_dict : dict Dictionary mapping Ensembl gene IDs to expression estimates (either FPKM or TPM) min_expression_value : float Threshold above which we'll keep an effect in the result collection
def transform_feature_names(transformer, in_names=None): if hasattr(transformer, 'get_feature_names'): return transformer.get_feature_names() raise NotImplementedError('transform_feature_names not available for ' '{}'.format(transformer))
Get feature names for transformer output as a function of input names. Used by :func:`explain_weights` when applied to a scikit-learn Pipeline, this ``singledispatch`` should be registered with custom name transformations for each class of transformer. If there is no ``singledispatch`` handler registered for a transformer class, ``transformer.get_feature_names()`` method is called; if there is no such method then feature names are not supported and this function raises an exception. Parameters ---------- transformer : scikit-learn-compatible transformer in_names : list of str, optional Names for features input to transformer.transform(). If not provided, the implementation may generate default feature names if the number of input features is known. Returns ------- feature_names : list of str
def findSequenceOnDisk(cls, pattern, strictPadding=False): seq = cls(pattern) if seq.frameRange() == '' and seq.padding() == '': if os.path.isfile(pattern): return seq patt = seq.format('{dirname}{basename}*{extension}') ext = seq.extension() basename = seq.basename() pad = seq.padding() globbed = iglob(patt) if pad and strictPadding: globbed = cls._filterByPaddingNum(globbed, seq.zfill()) pad = cls.conformPadding(pad) matches = cls.yield_sequences_in_list(globbed) for match in matches: if match.basename() == basename and match.extension() == ext: if pad and strictPadding: match.setPadding(pad) return match msg = 'no sequence found on disk matching {0}' raise FileSeqException(msg.format(pattern))
Search for a specific sequence on disk. The padding characters used in the `pattern` are used to filter the frame values of the files on disk (if `strictPadding` is True). Examples: Find sequence matching basename and extension, and a wildcard for any frame. returns bar.1.exr bar.10.exr, bar.100.exr, bar.1000.exr, inclusive >>> findSequenceOnDisk("seq/bar@@@@.exr") Find exactly 4-padded sequence, i.e. seq/bar1-100#.exr returns only frames bar1000.exr through bar9999.exr >>> findSequenceOnDisk("seq/bar#.exr", strictPadding=True) Args: pattern (str): the sequence pattern being searched for strictPadding (bool): if True, ignore files with padding length different from `pattern` Returns: str: Raises: :class:`.FileSeqException`: if no sequence is found on disk
def mag_calibration(self): self.calibration_state = self.CAL_MAG self.mag_dialog = SK8MagDialog(self.sk8.get_imu(self.spinIMU.value()), self) if self.mag_dialog.exec_() == QDialog.Rejected: return self.calculate_mag_calibration(self.mag_dialog.samples)
Perform magnetometer calibration for current IMU.
def printable(sequence): return ''.join(list( map(lambda c: c if c in PRINTABLE else '.', sequence) ))
Return a printable string from the input ``sequence`` :param sequence: byte or string sequence >>> print(printable('\\x1b[1;34mtest\\x1b[0m')) .[1;34mtest.[0m >>> printable('\\x00\\x01\\x02\\x03\\x04\\x05\\x06\\x06') == '........' True >>> print(printable('12345678')) 12345678 >>> print(printable('testing\\n')) testing.
def get_user_vault_instance_or_none(self, user): qset = self.filter(user=user) if not qset: return None if qset.count() > 1: raise Exception('This app does not currently support multiple vault ids') return qset.get()
Returns a vault_id string or None
def sample_statements(stmts, seed=None): if seed: numpy.random.seed(seed) new_stmts = [] r = numpy.random.rand(len(stmts)) for i, stmt in enumerate(stmts): if r[i] < stmt.belief: new_stmts.append(stmt) return new_stmts
Return statements sampled according to belief. Statements are sampled independently according to their belief scores. For instance, a Staement with a belief score of 0.7 will end up in the returned Statement list with probability 0.7. Parameters ---------- stmts : list[indra.statements.Statement] A list of INDRA Statements to sample. seed : Optional[int] A seed for the random number generator used for sampling. Returns ------- new_stmts : list[indra.statements.Statement] A list of INDRA Statements that were chosen by random sampling according to their respective belief scores.
def move(self, point: Point) -> 'Location': return self._replace(point=self.point + point)
Alter the point stored in the location while preserving the labware. This returns a new Location and does not alter the current one. It should be used like .. code-block:: python >>> loc = Location(Point(1, 1, 1), 'Hi') >>> new_loc = loc.move(Point(1, 1, 1)) >>> assert loc_2.point == Point(2, 2, 2) # True >>> assert loc.point == Point(1, 1, 1) # True
def send_verification_email(self): url = (self._imgur._base_url + "/3/account/{0}" "/verifyemail".format(self.name)) self._imgur._send_request(url, needs_auth=True, method='POST')
Send verification email to this users email address. Remember that the verification email may end up in the users spam folder.
def _did_count(self, connection): self.current_connection = connection response = connection.response count = 0 callback = None if 'X-Nuage-Count' in response.headers: count = int(response.headers['X-Nuage-Count']) if 'remote' in connection.callbacks: callback = connection.callbacks['remote'] if connection.async: if callback: callback(self, self.parent_object, count) self.current_connection.reset() self.current_connection = None else: if connection.response.status_code >= 400 and BambouConfig._should_raise_bambou_http_error: raise BambouHTTPError(connection=connection) return (self, self.parent_object, count)
Called when count if finished
def array2tree(arr, name='tree', tree=None): import ROOT if tree is not None: if not isinstance(tree, ROOT.TTree): raise TypeError("tree must be a ROOT.TTree") incobj = ROOT.AsCObject(tree) else: incobj = None cobj = _librootnumpy.array2tree_toCObj(arr, name=name, tree=incobj) return ROOT.BindObject(cobj, 'TTree')
Convert a numpy structured array into a ROOT TTree. Fields of basic types, strings, and fixed-size subarrays of basic types are supported. ``np.object`` and ``np.float16`` are currently not supported. Parameters ---------- arr : array A numpy structured array name : str (optional, default='tree') Name of the created ROOT TTree if ``tree`` is None. tree : ROOT TTree (optional, default=None) An existing ROOT TTree to be extended by the numpy array. Any branch with the same name as a field in the numpy array will be extended as long as the types are compatible, otherwise a TypeError is raised. New branches will be created and filled for all new fields. Returns ------- root_tree : a ROOT TTree Notes ----- When using the ``tree`` argument to extend and/or add new branches to an existing tree, note that it is possible to create branches of different lengths. This will result in a warning from ROOT when root_numpy calls the tree's ``SetEntries()`` method. Beyond that, the tree should still be usable. While it might not be generally recommended to create branches with differing lengths, this behaviour could be required in certain situations. root_numpy makes no attempt to prevent such behaviour as this would be more strict than ROOT itself. Also see the note about converting trees that have branches of different lengths into numpy arrays in the documentation of :func:`tree2array`. See Also -------- array2root root2array tree2array Examples -------- Convert a numpy array into a tree: >>> from root_numpy import array2tree >>> import numpy as np >>> >>> a = np.array([(1, 2.5, 3.4), ... (4, 5, 6.8)], ... dtype=[('a', np.int32), ... ('b', np.float32), ... ('c', np.float64)]) >>> tree = array2tree(a) >>> tree.Scan() ************************************************ * Row * a * b * c * ************************************************ * 0 * 1 * 2.5 * 3.4 * * 1 * 4 * 5 * 6.8 * ************************************************ Add new branches to an existing tree (continuing from the example above): >>> b = np.array([(4, 10), ... (3, 5)], ... dtype=[('d', np.int32), ... ('e', np.int32)]) >>> array2tree(b, tree=tree) <ROOT.TTree object ("tree") at 0x1449970> >>> tree.Scan() ************************************************************************ * Row * a * b * c * d * e * ************************************************************************ * 0 * 1 * 2.5 * 3.4 * 4 * 10 * * 1 * 4 * 5 * 6.8 * 3 * 5 * ************************************************************************
def list(gandi, fqdn, name, sort, type, rrset_type, text): domains = gandi.dns.list() domains = [domain['fqdn'] for domain in domains] if fqdn not in domains: gandi.echo('Sorry domain %s does not exist' % fqdn) gandi.echo('Please use one of the following: %s' % ', '.join(domains)) return output_keys = ['name', 'ttl', 'type', 'values'] result = gandi.dns.records(fqdn, sort_by=sort, text=text) if text: gandi.echo(result) return result for num, rec in enumerate(result): if type and rec['rrset_type'] != type: continue if rrset_type and rec['rrset_type'] != rrset_type: continue if name and rec['rrset_name'] != name: continue if num: gandi.separator_line() output_dns_records(gandi, rec, output_keys) return result
Display records for a domain.
def tilt_model(params, shape): mx = params["mx"].value my = params["my"].value off = params["off"].value bg = np.zeros(shape, dtype=float) + off x = np.arange(bg.shape[0]) - bg.shape[0] // 2 y = np.arange(bg.shape[1]) - bg.shape[1] // 2 x = x.reshape(-1, 1) y = y.reshape(1, -1) bg += mx * x + my * y return bg
lmfit tilt model
def _collapse_attributes(self, line, header, indexes): names = [] vals = [] pat = re.compile("[\W]+") for i in indexes: names.append(pat.sub("_", self._clean_header(header[i]))) vals.append(line[i]) Attrs = collections.namedtuple('Attrs', names) return Attrs(*vals)
Combine attributes in multiple columns into single named tuple.
def load_objects(self, addr, num_bytes, ret_on_segv=False): result = [] end = addr + num_bytes for page_addr in self._containing_pages(addr, end): try: page = self._get_page(page_addr // self._page_size) except KeyError: if self.allow_segv: if ret_on_segv: break raise SimSegfaultError(addr, 'read-miss') else: continue if self.allow_segv and not page.concrete_permissions & DbgPage.PROT_READ: if ret_on_segv: break raise SimSegfaultError(addr, 'non-readable') result.extend(page.load_slice(self.state, addr, end)) return result
Load memory objects from paged memory. :param addr: Address to start loading. :param num_bytes: Number of bytes to load. :param bool ret_on_segv: True if you want load_bytes to return directly when a SIGSEV is triggered, otherwise a SimSegfaultError will be raised. :return: list of tuples of (addr, memory_object) :rtype: tuple
def to_det_oid(self, det_id_or_det_oid): try: int(det_id_or_det_oid) except ValueError: return det_id_or_det_oid else: return self.get_det_oid(det_id_or_det_oid)
Convert det OID or ID to det OID
def build_param_doc(arg_names, arg_types, arg_descs, remove_dup=True): param_keys = set() param_str = [] for key, type_info, desc in zip(arg_names, arg_types, arg_descs): if key in param_keys and remove_dup: continue if key == 'num_args': continue param_keys.add(key) ret = '%s : %s' % (key, type_info) if len(desc) != 0: ret += '\n ' + desc param_str.append(ret) doc_str = ('Parameters\n' + '----------\n' + '%s\n') doc_str = doc_str % ('\n'.join(param_str)) return doc_str
Build argument docs in python style. arg_names : list of str Argument names. arg_types : list of str Argument type information. arg_descs : list of str Argument description information. remove_dup : boolean, optional Whether remove duplication or not. Returns ------- docstr : str Python docstring of parameter sections.
def Run(self): if not self.executable: logging.error('Could not locate "%s"' % self.long_name) return 0 finfo = os.stat(self.executable) self.date = time.localtime(finfo[stat.ST_MTIME]) logging.info('Running: %s %s </dev/null 2>&1' % (self.executable, FLAGS.help_flag)) (child_stdin, child_stdout_and_stderr) = os.popen4( [self.executable, FLAGS.help_flag]) child_stdin.close() self.output = child_stdout_and_stderr.readlines() child_stdout_and_stderr.close() if len(self.output) < _MIN_VALID_USAGE_MSG: logging.error('Error: "%s %s" returned only %d lines: %s' % (self.name, FLAGS.help_flag, len(self.output), self.output)) return 0 return 1
Run it and collect output. Returns: 1 (true) If everything went well. 0 (false) If there were problems.
def getOverlayTransformAbsolute(self, ulOverlayHandle): fn = self.function_table.getOverlayTransformAbsolute peTrackingOrigin = ETrackingUniverseOrigin() pmatTrackingOriginToOverlayTransform = HmdMatrix34_t() result = fn(ulOverlayHandle, byref(peTrackingOrigin), byref(pmatTrackingOriginToOverlayTransform)) return result, peTrackingOrigin, pmatTrackingOriginToOverlayTransform
Gets the transform if it is absolute. Returns an error if the transform is some other type.
def main(): features = os.environ.get('APE_PREPEND_FEATURES', '').split() inline_features = os.environ.get('PRODUCT_EQUATION', '').split() if inline_features: features += inline_features else: feature_file = os.environ.get('PRODUCT_EQUATION_FILENAME', '') if feature_file: features += get_features_from_equation_file(feature_file) else: if not features: raise EnvironmentIncomplete( 'Error running ape:\n' 'Either the PRODUCT_EQUATION or ' 'PRODUCT_EQUATION_FILENAME environment ' 'variable needs to be set!' ) run(sys.argv, features=features)
Entry point when used via command line. Features are given using the environment variable ``PRODUCT_EQUATION``. If it is not set, ``PRODUCT_EQUATION_FILENAME`` is tried: if it points to an existing equation file that selection is used. (if ``APE_PREPEND_FEATURES`` is given, those features are prepended) If the list of features is empty, ``ape.EnvironmentIncomplete`` is raised.
def write_index(self): self.fileobj.seek(self.last_offset) index = index_header.build(dict(entries=self.entries)) self.fileobj.write(index) self.filesize = self.fileobj.tell()
Write the index of all our files to the MAR file.
def _pys2code(self, line): row, col, tab, code = self._split_tidy(line, maxsplit=3) key = self._get_key(row, col, tab) self.code_array.dict_grid[key] = unicode(code, encoding='utf-8')
Updates code in pys code_array
def setup_session(endpoint_context, areq, uid, client_id='', acr='', salt='salt', authn_event=None): if authn_event is None and acr: authn_event = AuthnEvent(uid=uid, salt=salt, authn_info=acr, authn_time=time.time()) if not client_id: client_id = areq['client_id'] sid = endpoint_context.sdb.create_authz_session(authn_event, areq, client_id=client_id, uid=uid) endpoint_context.sdb.do_sub(sid, uid, '') return sid
Setting up a user session :param endpoint_context: :param areq: :param uid: :param acr: :param client_id: :param salt: :param authn_event: A already made AuthnEvent :return:
def set_metadata(self, metadata, utf8): cairo.cairo_pdf_surface_set_metadata( self._pointer, metadata, _encode_string(utf8)) self._check_status()
Sets document metadata. The ``PDF_METADATA_CREATE_DATE`` and ``PDF_METADATA_MOD_DATE`` values must be in ISO-8601 format: YYYY-MM-DDThh:mm:ss. An optional timezone of the form "[+/-]hh:mm" or "Z" for UTC time can be appended. All other metadata values can be any UTF-8 string. :param metadata: the metadata item to set. :param utf8: metadata value. *New in cairo 1.16.* *New in cairocffi 0.9.*
def request_set_status(self, text: str) -> dict: method_params = {'text': text} response = self.session.send_method_request('status.set', method_params) self.check_for_errors('status.set', method_params, response) return response
Method to set user status
def to_json(self): capsule = {} capsule["Hierarchy"] = [] for ( dying, (persistence, surviving, saddle), ) in self.merge_sequence.items(): capsule["Hierarchy"].append( { "Dying": dying, "Persistence": persistence, "Surviving": surviving, "Saddle": saddle, } ) capsule["Partitions"] = [] base = np.array([None, None] * len(self.Y)).reshape(-1, 2) for (min_index, max_index), items in self.base_partitions.items(): base[items, :] = [min_index, max_index] capsule["Partitions"] = base.tolist() return json.dumps(capsule)
Writes the complete Morse-Smale merge hierarchy to a string object. @ Out, a string object storing the entire merge hierarchy of all minima and maxima.
def set_default(self, val, force=False): if self.default is None or force: self.default = val self.set_value(val) self.has_changed = True else: raise OptionError( "cannot override existing default without using the 'force' " "option" )
this function allows a default to be set on an option that dosen't have one. It is used when a base class defines an Option for use in derived classes but cannot predict what value would useful to the derived classes. This gives the derived classes the opportunity to set a logical default appropriate for the derived class' context. For example: class A(RequiredConfig): required_config = Namespace() required_config.add_option( 'x', default=None ) class B(A): A.required_config.x.set_default(68) parameters: val - the value for the default force - normally this function only works on Options that have not had a default set (default is None). This boolean allows you to override an existing default.
def unique(self, e, **kwargs): if not isinstance(e, claripy.ast.Base): return True if o.SYMBOLIC not in self.state.options and self.symbolic(e): return False r = self.eval_upto(e, 2, **kwargs) if len(r) == 1: self.add(e == r[0]) return True elif len(r) == 0: raise SimValueError("unsatness during uniqueness check(ness)") else: return False
Returns True if the expression `e` has only one solution by querying the constraint solver. It does also add that unique solution to the solver's constraints.
def instance(self, skip_exist_test=False): model = self.database._models[self.related_to] meth = model.lazy_connect if skip_exist_test else model return meth(self.proxy_get())
Returns the instance of the related object linked by the field.
def validate_value_string (f, value_string): assert isinstance(f, Feature) assert isinstance(value_string, basestring) if f.free or value_string in f.values: return values = [value_string] if f.subfeatures: if not value_string in f.values and \ not value_string in f.subfeatures: values = value_string.split('-') if not values[0] in f.values and \ (values[0] or not f.optional): raise InvalidValue ("'%s' is not a known value of feature '%s'\nlegal values: '%s'" % (values [0], f.name, f.values)) for v in values [1:]: implied_subfeature(f, v, values[0])
Checks that value-string is a valid value-string for the given feature.
def add_device_override(self, addr, cat, subcat, firmware=None): self.plm.devices.add_override(addr, 'cat', cat) self.plm.devices.add_override(addr, 'subcat', subcat) if firmware: self.plm.devices.add_override(addr, 'firmware', firmware)
Add a device override to the PLM.
def directory(name, profile=None, **kwargs): created = False rtn = { 'name': name, 'comment': 'Directory exists', 'result': True, 'changes': {} } current = __salt__['etcd.get'](name, profile=profile, recurse=True, **kwargs) if not current: created = True result = __salt__['etcd.set'](name, None, directory=True, profile=profile, **kwargs) if result and result != current: if created: rtn['comment'] = 'New directory created' rtn['changes'] = { name: 'Created' } return rtn
Create a directory in etcd. name The etcd directory name, for example: ``/foo/bar/baz``. profile Optional, defaults to ``None``. Sets the etcd profile to use which has been defined in the Salt Master config. .. code-block:: yaml my_etd_config: etcd.host: 127.0.0.1 etcd.port: 4001
def pseudolocalize(self, s): if not s: return u"" if not isinstance(s, six.text_type): raise TypeError("String to pseudo-localize must be of type '{0}'.".format(six.text_type.__name__)) if not self.transforms: return s fmt_spec = re.compile( r , re.VERBOSE) if not fmt_spec.search(s): result = s for munge in self.transforms: result = munge(result) else: substrings = fmt_spec.split(s) for munge in self.transforms: if munge in transforms._transliterations: for idx in range(len(substrings)): if not fmt_spec.match(substrings[idx]): substrings[idx] = munge(substrings[idx]) else: continue else: continue result = u"".join(substrings) for munge in self.transforms: if munge not in transforms._transliterations: result = munge(result) return result
Performs pseudo-localization on a string. The specific transforms to be applied to the string is defined in the transforms field of the object. :param s: String to pseudo-localize. :returns: Copy of the string s with the transforms applied. If the input string is an empty string or None, an empty string is returned.
def __highlight_occurence(self, file, occurence): if not self.__container.get_editor(file): cache_data = self.__files_cache.get_content(file) if cache_data: document = cache_data.document or self.__get_document(cache_data.content) self.__container.load_document(document, file) self.__uncache(file) else: self.__container.load_file(file) else: self.__container.set_current_editor(file) if not occurence: return cursor = self.__container.get_current_editor().textCursor() cursor.setPosition(occurence.position, QTextCursor.MoveAnchor) cursor.setPosition(occurence.position + occurence.length, QTextCursor.KeepAnchor) self.__container.get_current_editor().setTextCursor(cursor)
Highlights given file occurence. :param file: File containing the occurence. :type file: unicode :param occurence: Occurence to highlight. :type occurence: Occurence or SearchOccurenceNode
def _save_config(section, token, value): cmd = NIRTCFG_PATH cmd += ' --set section={0},token=\'{1}\',value=\'{2}\''.format(section, token, value) if __salt__['cmd.run_all'](cmd)['retcode'] != 0: exc_msg = 'Error: could not set {} to {} for {}\n'.format(token, value, section) raise salt.exceptions.CommandExecutionError(exc_msg)
Helper function to persist a configuration in the ini file
def validate(self, raw_data, **kwargs): try: converted_data = int(raw_data) return super(IntegerField, self).validate(converted_data) except ValueError: raise ValidationException(self.messages['invalid'], repr(raw_data))
Convert the raw_data to an integer.
def add(self, factory, component, properties=None): with self.__lock: if component in self.__names: raise ValueError( "Component name already queued: {0}".format(component) ) if properties is None: properties = {} self.__names[component] = factory self.__queue.setdefault(factory, {})[component] = properties try: with use_ipopo(self.__context) as ipopo: self._try_instantiate(ipopo, factory, component) except BundleException: pass
Enqueues the instantiation of the given component :param factory: Factory name :param component: Component name :param properties: Component properties :raise ValueError: Component name already reserved in the queue :raise Exception: Error instantiating the component
def key_sign(rsakey, message, digest): padding = _asymmetric.padding.PKCS1v15() signature = rsakey.sign(message, padding, digest) return signature
Sign the given message with the RSA key.
def charge_series(seq, granularity=0.1): if 'X' in seq: warnings.warn(_nc_warning_str, NoncanonicalWarning) ph_range = numpy.arange(1, 13, granularity) charge_at_ph = [sequence_charge(seq, ph) for ph in ph_range] return ph_range, charge_at_ph
Calculates the charge for pH 1-13. Parameters ---------- seq : str Sequence of amino acids. granularity : float, optional Granularity of pH values i.e. if 0.1 pH = [1.0, 1.1, 1.2...]
def properties(self): properties = _get_tree_properties(self) properties.update({ 'is_bst': _is_bst(self), 'is_balanced': _is_balanced(self) >= 0 }) return properties
Return various properties of the binary tree. :return: Binary tree properties. :rtype: dict **Example**: .. doctest:: >>> from binarytree import Node >>> >>> root = Node(1) >>> root.left = Node(2) >>> root.right = Node(3) >>> root.left.left = Node(4) >>> root.left.right = Node(5) >>> props = root.properties >>> >>> props['height'] # equivalent to root.height 2 >>> props['size'] # equivalent to root.size 5 >>> props['max_leaf_depth'] # equivalent to root.max_leaf_depth 2 >>> props['min_leaf_depth'] # equivalent to root.min_leaf_depth 1 >>> props['max_node_value'] # equivalent to root.max_node_value 5 >>> props['min_node_value'] # equivalent to root.min_node_value 1 >>> props['leaf_count'] # equivalent to root.leaf_count 3 >>> props['is_balanced'] # equivalent to root.is_balanced True >>> props['is_bst'] # equivalent to root.is_bst False >>> props['is_complete'] # equivalent to root.is_complete True >>> props['is_max_heap'] # equivalent to root.is_max_heap False >>> props['is_min_heap'] # equivalent to root.is_min_heap True >>> props['is_perfect'] # equivalent to root.is_perfect False >>> props['is_strict'] # equivalent to root.is_strict True
def explained_variance(returns, values): exp_var = 1 - torch.var(returns - values) / torch.var(returns) return exp_var.item()
Calculate how much variance in returns do the values explain
def unsubscribe(self, sid): if sid not in self.observers: raise KeyError( 'Cannot disconnect a observer does not connected to subject' ) del self.observers[sid]
Disconnect an observer from this subject
def fit(self, sequences, y=None): check_iter_of_sequences(sequences, allow_trajectory=self._allow_trajectory) super(MultiSequenceClusterMixin, self).fit(self._concat(sequences)) if hasattr(self, 'labels_'): self.labels_ = self._split(self.labels_) return self
Fit the clustering on the data Parameters ---------- sequences : list of array-like, each of shape [sequence_length, n_features] A list of multivariate timeseries. Each sequence may have a different length, but they all must have the same number of features. Returns ------- self
def table( data_frame, scale: float = 0.7, include_index: bool = False, max_rows: int = 500 ): r = _get_report() r.append_body(render.table( data_frame=data_frame, scale=scale, include_index=include_index, max_rows=max_rows )) r.stdout_interceptor.write_source('[ADDED] Table\n')
Adds the specified data frame to the display in a nicely formatted scrolling table. :param data_frame: The pandas data frame to be rendered to a table. :param scale: The display scale with units of fractional screen height. A value of 0.5 constrains the output to a maximum height equal to half the height of browser window when viewed. Values below 1.0 are usually recommended so the entire output can be viewed without scrolling. :param include_index: Whether or not the index column should be included in the displayed output. The index column is not included by default because it is often unnecessary extra information in the display of the data. :param max_rows: This argument exists to prevent accidentally writing very large data frames to a table, which can cause the notebook display to become sluggish or unresponsive. If you want to display large tables, you need only increase the value of this argument.
def get_assessment_form_for_create(self, assessment_record_types): for arg in assessment_record_types: if not isinstance(arg, ABCType): raise errors.InvalidArgument('one or more argument array elements is not a valid OSID Type') if assessment_record_types == []: obj_form = objects.AssessmentForm( bank_id=self._catalog_id, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) else: obj_form = objects.AssessmentForm( bank_id=self._catalog_id, record_types=assessment_record_types, runtime=self._runtime, effective_agent_id=self.get_effective_agent_id(), proxy=self._proxy) self._forms[obj_form.get_id().get_identifier()] = not CREATED return obj_form
Gets the assessment form for creating new assessments. A new form should be requested for each create transaction. arg: assessment_record_types (osid.type.Type[]): array of assessment record types to be included in the create operation or an empty list if none return: (osid.assessment.AssessmentForm) - the assessment form raise: NullArgument - ``assessment_record_types`` is ``null`` raise: OperationFailed - unable to complete request raise: PermissionDenied - authorization failure occurred raise: Unsupported - unable to get form for requested record types *compliance: mandatory -- This method must be implemented.*
def find_missing_env(self): missing = [] for e in self.env: if e.default_val is None and e.set_val is None: if e.env_name not in os.environ: missing.append(e.env_name) if missing: raise BadOption("Some environment variables aren't in the current environment", missing=missing)
Find any missing environment variables
def shift(self, periods, freq=None): result = self._data._time_shift(periods, freq=freq) return type(self)(result, name=self.name)
Shift index by desired number of time frequency increments. This method is for shifting the values of datetime-like indexes by a specified time increment a given number of times. Parameters ---------- periods : int Number of periods (or increments) to shift by, can be positive or negative. .. versionchanged:: 0.24.0 freq : pandas.DateOffset, pandas.Timedelta or string, optional Frequency increment to shift by. If None, the index is shifted by its own `freq` attribute. Offset aliases are valid strings, e.g., 'D', 'W', 'M' etc. Returns ------- pandas.DatetimeIndex Shifted index. See Also -------- Index.shift : Shift values of Index. PeriodIndex.shift : Shift values of PeriodIndex.
def server_close(self): self.log.debug("Closing the socket server connection.") TCPServer.server_close(self) self.queue_manager.close() self.topic_manager.close() if hasattr(self.authenticator, 'close'): self.authenticator.close() self.shutdown()
Closes the socket server and any associated resources.
def savepoint(cr): if hasattr(cr, 'savepoint'): with cr.savepoint(): yield else: name = uuid.uuid1().hex cr.execute('SAVEPOINT "%s"' % name) try: yield cr.execute('RELEASE SAVEPOINT "%s"' % name) except: cr.execute('ROLLBACK TO SAVEPOINT "%s"' % name)
return a context manager wrapping postgres savepoints
def print_maps_by_type(map_type, number=None): map_type = map_type.lower().capitalize() if map_type not in MAP_TYPES: s = 'Invalid map type, must be one of {0}'.format(MAP_TYPES) raise ValueError(s) print(map_type) map_keys = sorted(COLOR_MAPS[map_type].keys()) format_str = '{0:8} : {1}' for mk in map_keys: num_keys = sorted(COLOR_MAPS[map_type][mk].keys(), key=int) if not number or str(number) in num_keys: num_str = '{' + ', '.join(num_keys) + '}' print(format_str.format(mk, num_str))
Print all available maps of a given type. Parameters ---------- map_type : {'Sequential', 'Diverging', 'Qualitative'} Select map type to print. number : int, optional Filter output by number of defined colors. By default there is no numeric filtering.
def offline_plotly_data(data, filename=None, config=None, validate=True, default_width='100%', default_height=525, global_requirejs=False): r config_default = dict(DEFAULT_PLOTLY_CONFIG) if config is not None: config_default.update(config) with open(os.path.join(DATA_PATH, 'plotly.js.min'), 'rt') as f: js = f.read() html, divid, width, height = _plot_html( data, config=config_default, validate=validate, default_width=default_width, default_height=default_height, global_requirejs=global_requirejs) html = PLOTLY_HTML.format(plotlyjs=js, plotlyhtml=html) if filename and isinstance(filename, str): with open(filename, 'wt') as f: f.write(html) return html
r""" Write a plotly scatter plot to HTML file that doesn't require server >>> from nlpia.loaders import get_data >>> df = get_data('etpinard') # pd.read_csv('https://plot.ly/~etpinard/191.csv') >>> df.columns = [eval(c) if c[0] in '"\'' else str(c) for c in df.columns] >>> data = {'data': [ ... Scatter(x=df[continent+', x'], ... y=df[continent+', y'], ... text=df[continent+', text'], ... marker=Marker(size=df[continent+', size'].fillna(10000), sizemode='area', sizeref=131868,), ... mode='markers', ... name=continent) for continent in ['Africa', 'Americas', 'Asia', 'Europe', 'Oceania'] ... ], ... 'layout': Layout(xaxis=XAxis(title='Life Expectancy'), yaxis=YAxis(title='GDP per Capita', type='log')) ... } >>> html = offline_plotly_data(data, filename=None)
def arp_suppression(self, **kwargs): name = kwargs.pop('name') enable = kwargs.pop('enable', True) get = kwargs.pop('get', False) callback = kwargs.pop('callback', self._callback) method_class = self._interface arp_args = dict(name=name) if name: if not pynos.utilities.valid_vlan_id(name): raise InvalidVlanId("`name` must be between `1` and `8191`") arp_suppression = getattr(method_class, 'interface_vlan_interface_vlan_suppress_' 'arp_suppress_arp_enable') config = arp_suppression(**arp_args) if get: return callback(config, handler='get_config') if not enable: config.find('.//*suppress-arp').set('operation', 'delete') return callback(config)
Enable Arp Suppression on a Vlan. Args: name:Vlan name on which the Arp suppression needs to be enabled. enable (bool): If arp suppression should be enabled or disabled.Default:``True``. get (bool) : Get config instead of editing config. (True, False) callback (function): A function executed upon completion of the method. The only parameter passed to `callback` will be the ``ElementTree`` `config`. Returns: Return value of `callback`. Raises: KeyError: if `name` is not passed. ValueError: if `name` is invalid. output2 = dev.interface.arp_suppression(name='89') Examples: >>> import pynos.device >>> switches = ['10.24.39.211', '10.24.39.203'] >>> auth = ('admin', 'password') >>> for switch in switches: ... conn = (switch, '22') ... with pynos.device.Device(conn=conn, auth=auth) as dev: ... output = dev.interface.arp_suppression( ... name='89') ... output = dev.interface.arp_suppression( ... get=True,name='89') ... output = dev.interface.arp_suppression( ... enable=False,name='89') ... # doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): KeyError
def load_module(self, loader): modfile, pathname, description = loader.info module = imp.load_module( loader.fullname, modfile, pathname, description ) sys.modules[loader.fullname] = module self.__loaded_modules.add(loader.fullname) autodecorator.decorate_module(module, decorator=self.__decorator) return module
Load the module. Required for the Python meta-loading mechanism.
def deal_with_changeset_stack_policy(self, fqn, stack_policy): if stack_policy: kwargs = generate_stack_policy_args(stack_policy) kwargs["StackName"] = fqn logger.debug("Setting stack policy on %s.", fqn) self.cloudformation.set_stack_policy(**kwargs)
Set a stack policy when using changesets. ChangeSets don't allow you to set stack policies in the same call to update them. This sets it before executing the changeset if the stack policy is passed in. Args: stack_policy (:class:`stacker.providers.base.Template`): A template object representing a stack policy.
def pkg_upgrade(repo, skip, flag): Msg().checking() PACKAGES_TXT = RepoInit(repo).fetch()[0] pkgs_for_upgrade = [] data = repo_data(PACKAGES_TXT, repo, flag="") for pkg in installed(): status(0.0005) inst_pkg = split_package(pkg) for name in data[0]: if name: repo_pkg = split_package(name[:-4]) if (repo_pkg[0] == inst_pkg[0] and LooseVersion(repo_pkg[1]) > LooseVersion(inst_pkg[1]) and repo_pkg[3] >= inst_pkg[3] and inst_pkg[0] not in skip and repo_pkg[1] != "blacklist"): pkgs_for_upgrade.append(repo_pkg[0]) Msg().done() if "--checklist" in flag: pkgs_for_upgrade = choose_upg(pkgs_for_upgrade) return pkgs_for_upgrade
Checking packages for upgrade
def create(cls, name, abr_type='cisco', auto_cost_bandwidth=100, deprecated_algorithm=False, initial_delay=200, initial_hold_time=1000, max_hold_time=10000, shutdown_max_metric_lsa=0, startup_max_metric_lsa=0): json = {'name': name, 'abr_type': abr_type, 'auto_cost_bandwidth': auto_cost_bandwidth, 'deprecated_algorithm': deprecated_algorithm, 'initial_delay': initial_delay, 'initial_hold_time': initial_hold_time, 'max_hold_time': max_hold_time, 'shutdown_max_metric_lsa': shutdown_max_metric_lsa, 'startup_max_metric_lsa': startup_max_metric_lsa} return ElementCreator(cls, json)
Create custom Domain Settings Domain settings are referenced by an OSPFProfile :param str name: name of custom domain settings :param str abr_type: cisco|shortcut|standard :param int auto_cost_bandwidth: Mbits/s :param bool deprecated_algorithm: RFC 1518 compatibility :param int initial_delay: in milliseconds :param int initial_hold_type: in milliseconds :param int max_hold_time: in milliseconds :param int shutdown_max_metric_lsa: in seconds :param int startup_max_metric_lsa: in seconds :raises CreateElementFailed: create failed with reason :return: instance with meta :rtype: OSPFDomainSetting
def tidy_eggs_list(eggs_list): tmp = [] for line in eggs_list: line = line.lstrip().rstrip() line = line.replace('\'', '') line = line.replace(',', '') if line.endswith('site-packages'): continue tmp.append(line) return tmp
Tidy the given eggs list
def get_tracks(self): return _extract_tracks( self._request(self.ws_prefix + ".getInfo", cacheable=True), self.network )
Returns the list of Tracks on this album.
async def _set_whitelist(self): page = self.settings() if 'whitelist' in page: await self._send_to_messenger_profile(page, { 'whitelisted_domains': page['whitelist'], }) logger.info('Whitelisted %s for page %s', page['whitelist'], page['page_id'])
Whitelist domains for the messenger extensions
def SXTB(self, params): Ra, Rb = self.get_two_parameters(r'\s*([^\s,]*),\s*([^\s,]*)(,\s*[^\s,]*)*\s*', params) self.check_arguments(low_registers=(Ra, Rb)) def SXTB_func(): if self.register[Rb] & (1 << 7): self.register[Ra] = 0xFFFFFF00 + (self.register[Rb] & 0xFF) else: self.register[Ra] = (self.register[Rb] & 0xFF) return SXTB_func
STXB Ra, Rb Sign extend the byte in Rb and store the result in Ra
def ensure_contiguity_in_observation_rows(obs_id_vector): contiguity_check_array = (obs_id_vector[1:] - obs_id_vector[:-1]) >= 0 if not contiguity_check_array.all(): problem_ids = obs_id_vector[np.where(~contiguity_check_array)] msg_1 = "All rows pertaining to a given choice situation must be " msg_2 = "contiguous. \nRows pertaining to the following observation " msg_3 = "id's are not contiguous: \n{}" raise ValueError(msg_1 + msg_2 + msg_3.format(problem_ids.tolist())) else: return None
Ensures that all rows pertaining to a given choice situation are located next to one another. Raises a helpful ValueError otherwise. This check is needed because the hessian calculation function requires the design matrix to have contiguity in rows with the same observation id. Parameters ---------- rows_to_obs : 2D scipy sparse array. Should map each row of the long format dataferame to the unique observations in the dataset. obs_id_vector : 1D ndarray of ints. Should contain the id (i.e. a unique integer) that corresponds to each choice situation in the dataset. Returns ------- None.
def dropna(self): not_nas = [v.notna() for v in self.values] and_filter = reduce(lambda x, y: x & y, not_nas) return self[and_filter]
Returns MultiIndex without any rows containing null values according to Baloo's convention. Returns ------- MultiIndex MultiIndex with no null values.
def _emiss_ee(self, Eph): if self.weight_ee == 0.0: return np.zeros_like(Eph) gam = np.vstack(self._gam) emiss = c.cgs * trapz_loglog( np.vstack(self._nelec) * self._sigma_ee(gam, Eph), self._gam, axis=0, ) return emiss
Electron-electron bremsstrahlung emissivity per unit photon energy
def parse(self, argv): kwargs, args = self.parse_args(argv) self.result['args'] += args for dest in self.dests: value = getattr(kwargs, dest) if value is not None: self.result['kwargs'][dest] = value return self
Parse the given argument vector.
def run_command(args, asynchronous=False): logging.info("Executing %s command %s.", asynchronous and 'asynchronous' or 'synchronous', args) process = subprocess.Popen(args, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) try: timeout = asynchronous and 1 or None output = process.communicate(timeout=timeout)[0].decode('utf8') except subprocess.TimeoutExpired: pass if asynchronous: return PopenOutput(None, 'Asynchronous call.') else: return PopenOutput(process.returncode, output)
Executes a command returning its exit code and output.
def mul(a, b): if a is None: if b is None: return None else: return b elif b is None: return a return a * b
Multiply two values, ignoring None
def save_list(lst, path): with open(path, 'wb') as out: lines = [] for item in lst: if isinstance(item, (six.text_type, six.binary_type)): lines.append(make_str(item)) else: lines.append(make_str(json.dumps(item))) out.write(b'\n'.join(lines) + b'\n')
Save items from list to the file.
def _load_pickle(self, filename): with open(filename, 'rb') as file_handle: self._sensors.update(pickle.load(file_handle))
Load sensors from pickle file.
def addToCommits(self, commit: Commit, sender: str): self._bls_bft_replica.process_commit(commit, sender) self.commits.addVote(commit, sender) self.tryOrder(commit)
Add the specified COMMIT to this replica's list of received commit requests. :param commit: the COMMIT to add to the list :param sender: the name of the node that sent the COMMIT
def SelectFieldPrompt(field_name, context_str, *options): option_format_str = '[ {} ] "{}"' option_dict = {} print(context_str) print('Please select one of the following options for field "{}"'.format( field_name) ) for cnt, option in enumerate(options): option_dict['{}'.format(cnt + 1)] = option if not callable(option): print(option_format_str.format(cnt + 1, u(str(option)))) else: print(option_format_str.format(cnt + 1, option.__name__)) choice = None while choice not in option_dict: choice = input('option> ').strip() new_value = option_dict[choice] if callable(new_value): return new_value() else: return new_value
Prompts user to pick from provided options. It is possible to provide a function as an option although it is not yet tested. This could allow a user to be prompted to provide their own value rather than the listed options. Args: field_name (string): Name of the field. context_str (string): Printed to give the user context. options: Variable arguments, should be vobject Components in a list. As retrieved from a vCard.contents dictionary. Returns: One of the options passed in. Ideally always a list.
def fit_var(self): if self.activations_ is None: raise RuntimeError("VAR fitting requires source activations (run do_mvarica first)") self.var_.fit(data=self.activations_[self.trial_mask_, :, :]) self.connectivity_ = Connectivity(self.var_.coef, self.var_.rescov, self.nfft_) return self
Fit a VAR model to the source activations. Returns ------- self : Workspace The Workspace object. Raises ------ RuntimeError If the :class:`Workspace` instance does not contain source activations.
def get_config(key): key = 'AVATAR_{0}'.format(key.upper()) local_config = current_app.config.get(key) return local_config or getattr(theme.current, key, DEFAULTS[key])
Get an identicon configuration parameter. Precedance order is: - application config (`udata.cfg`) - theme config - default
def _render_serializable(self, list_of_objs, context): output = [] for obj in list_of_objs: if obj is not None: item = self._item_resource._render_serializable(obj, context) output.append(item) return output
Iterates through the passed in `list_of_objs` and calls the `_render_serializable` method of each object's Resource type. This will probably support heterogeneous types at some point (hence the `item_types` initialization, as opposed to just item_type), but that might be better suited to something else like a ResourceDict. This method returns a JSON-serializable list of JSON-serializable dicts.
def copy(self): o = SimLibrary() o.procedures = dict(self.procedures) o.non_returning = set(self.non_returning) o.prototypes = dict(self.prototypes) o.default_ccs = dict(self.default_ccs) o.names = list(self.names) return o
Make a copy of this SimLibrary, allowing it to be mutated without affecting the global version. :return: A new SimLibrary object with the same library references but different dict/list references
def public_decrypt(pub, message): if HAS_M2: return pub.public_decrypt(message, salt.utils.rsax931.RSA_X931_PADDING) else: verifier = salt.utils.rsax931.RSAX931Verifier(pub.exportKey('PEM')) return verifier.verify(message)
Verify an M2Crypto-compatible signature :param Crypto.PublicKey.RSA._RSAobj key: The RSA public key object :param str message: The signed message to verify :rtype: str :return: The message (or digest) recovered from the signature, or an empty string if the verification failed
def deleteResourceFile(self, pid, filename): url = "{url_base}/resource/{pid}/files/{filename}".format(url_base=self.url_base, pid=pid, filename=filename) r = self._request('DELETE', url) if r.status_code != 200: if r.status_code == 403: raise HydroShareNotAuthorized(('DELETE', url)) elif r.status_code == 404: raise HydroShareNotFound((pid, filename)) else: raise HydroShareHTTPException((url, 'DELETE', r.status_code)) response = r.json() assert(response['resource_id'] == pid) return response['resource_id']
Delete a resource file :param pid: The HydroShare ID of the resource :param filename: String representing the name of the resource file to delete :return: Dictionary containing 'resource_id' the ID of the resource from which the file was deleted, and 'file_name' the filename of the file deleted. :raises: HydroShareNotAuthorized if user is not authorized to perform action. :raises: HydroShareNotFound if the resource or resource file was not found. :raises: HydroShareHTTPException if an unexpected HTTP response code is encountered.
def get_coi(self, params_dict): lat = str(params_dict['lat']) lon = str(params_dict['lon']) start = params_dict['start'] interval = params_dict['interval'] if start is None: timeref = 'current' else: if interval is None: timeref = self._trim_to(timeformatutils.to_date(start), 'year') else: timeref = self._trim_to(timeformatutils.to_date(start), interval) fixed_url = '%s/%s,%s/%s.json' % (CO_INDEX_URL, lat, lon, timeref) uri = http_client.HttpClient.to_url(fixed_url, self._API_key, None) _, json_data = self._client.cacheable_get_json(uri) return json_data
Invokes the CO Index endpoint :param params_dict: dict of parameters :returns: a string containing raw JSON data :raises: *ValueError*, *APICallError*
def deploy(self, initial_instance_count, instance_type, accelerator_type=None, endpoint_name=None, **kwargs): endpoint_name = endpoint_name or self.best_training_job() best_estimator = self.estimator.attach(self.best_training_job(), sagemaker_session=self.estimator.sagemaker_session) return best_estimator.deploy(initial_instance_count, instance_type, accelerator_type=accelerator_type, endpoint_name=endpoint_name, **kwargs)
Deploy the best trained or user specified model to an Amazon SageMaker endpoint and return a ``sagemaker.RealTimePredictor`` object. For more information: http://docs.aws.amazon.com/sagemaker/latest/dg/how-it-works-training.html Args: initial_instance_count (int): Minimum number of EC2 instances to deploy to an endpoint for prediction. instance_type (str): Type of EC2 instance to deploy to an endpoint for prediction, for example, 'ml.c4.xlarge'. accelerator_type (str): Type of Elastic Inference accelerator to attach to an endpoint for model loading and inference, for example, 'ml.eia1.medium'. If not specified, no Elastic Inference accelerator will be attached to the endpoint. For more information: https://docs.aws.amazon.com/sagemaker/latest/dg/ei.html endpoint_name (str): Name to use for creating an Amazon SageMaker endpoint. If not specified, the name of the training job is used. **kwargs: Other arguments needed for deployment. Please refer to the ``create_model()`` method of the associated estimator to see what other arguments are needed. Returns: sagemaker.predictor.RealTimePredictor: A predictor that provides a ``predict()`` method, which can be used to send requests to the Amazon SageMaker endpoint and obtain inferences.
def _FilterOutPathInfoDuplicates(path_infos): pi_dict = {} for pi in path_infos: path_key = (pi.path_type, pi.GetPathID()) pi_dict.setdefault(path_key, []).append(pi) def _SortKey(pi): return ( pi.stat_entry.st_ctime, pi.stat_entry.st_mtime, pi.stat_entry.st_atime, pi.stat_entry.st_ino, ) for pi_values in pi_dict.values(): if len(pi_values) > 1: pi_values.sort(key=_SortKey, reverse=True) return [v[0] for v in pi_dict.values()]
Filters out duplicates from passed PathInfo objects. Args: path_infos: An iterable with PathInfo objects. Returns: A list of PathInfo objects with duplicates removed. Duplicates are removed following this logic: they're sorted by (ctime, mtime, atime, inode number) in the descending order and then the first one is taken and the others are dropped.
def entry_point_name_to_identifier(entry_point_name): try: entry_point_name.encode('ascii') ascii_name = entry_point_name except UnicodeEncodeError: ascii_name = entry_point_name.encode('punycode').decode('ascii') return ''.join(char for char in ascii_name if char in string.ascii_lowercase + string.digits)
Transform an entry point name into an identifier suitable for inclusion in a PyPI package name.
def validate_experimental(context, param, value): if value is None: return config = ExperimentConfiguration(value) config.validate() return config
Load and validate an experimental data configuration.
def update_version(self, service_id, version_number, **kwargs): body = self._formdata(kwargs, FastlyVersion.FIELDS) content = self._fetch("/service/%s/version/%d/" % (service_id, version_number), method="PUT", body=body) return FastlyVersion(self, content)
Update a particular version for a particular service.
def assume(self, other): self._arch = other._arch self._bits = other._bits self._endian = other._endian self._mode = other._mode
Assume the identity of another target. This can be useful to make the global target assume the identity of an ELF executable. Arguments: other(:class:`Target`): The target whose identity to assume. Example: >>> from pwny import * >>> target.assume(ELF('my-executable'))
def unpickle_file(picklefile, **kwargs): with open(picklefile, 'rb') as f: return pickle.load(f, **kwargs)
Helper function to unpickle data from `picklefile`.
async def send_rpc_command(self, short_name, rpc_id, payload, sender_client, timeout=1.0): rpc_tag = str(uuid.uuid4()) self.rpc_results.declare(rpc_tag) if short_name in self.services and short_name in self.agents: agent_tag = self.agents[short_name] rpc_message = { 'rpc_id': rpc_id, 'payload': payload, 'response_uuid': rpc_tag } self.in_flight_rpcs[rpc_tag] = InFlightRPC(sender_client, short_name, monotonic(), timeout) await self._notify_update(short_name, 'rpc_command', rpc_message, directed_client=agent_tag) else: response = dict(result='service_not_found', response=b'') self.rpc_results.set(rpc_tag, response) return rpc_tag
Send an RPC to a service using its registered agent. Args: short_name (str): The name of the service we would like to send and RPC to rpc_id (int): The rpc id that we would like to call payload (bytes): The raw bytes that we would like to send as an argument sender_client (str): The uuid of the sending client timeout (float): The maximum number of seconds before we signal a timeout of the RPC Returns: str: A unique id that can used to identify the notified response of this RPC.
def add(self, original_index, operation): self.index_map.append(original_index) self.ops.append(operation)
Add an operation to this Run instance. :Parameters: - `original_index`: The original index of this operation within a larger bulk operation. - `operation`: The operation document.
def _convert_to_indexer(self, obj, axis=None, is_setter=False): if axis is None: axis = self.axis or 0 if isinstance(obj, slice): return self._convert_slice_indexer(obj, axis) elif is_float(obj): return self._convert_scalar_indexer(obj, axis) try: self._validate_key(obj, axis) return obj except ValueError: raise ValueError("Can only index by location with " "a [{types}]".format(types=self._valid_types))
much simpler as we only have to deal with our valid types
def request_network_status(blink, network): url = "{}/network/{}".format(blink.urls.base_url, network) return http_get(blink, url)
Request network information. :param blink: Blink instance. :param network: Sync module network id.
def refill_main_wallet(self, from_address, to_address, nfees, ntokens, password, min_confirmations=6, sync=False): path, from_address = from_address unsigned_tx = self._t.simple_transaction(from_address, [(to_address, self.fee)] * nfees + [(to_address, self.token)] * ntokens, min_confirmations=min_confirmations) signed_tx = self._t.sign_transaction(unsigned_tx, password) txid = self._t.push(signed_tx) return txid
Refill the Federation wallet with tokens and fees. This keeps the federation wallet clean. Dealing with exact values simplifies the transactions. No need to calculate change. Easier to keep track of the unspents and prevent double spends that would result in transactions being rejected by the bitcoin network. Args: from_address (Tuple[str]): Refill wallet address. Refills the federation wallet with tokens and fees to_address (str): Federation wallet address nfees (int): Number of fees to transfer. Each fee is 10000 satoshi. Used to pay for the transactions ntokens (int): Number of tokens to transfer. Each token is 600 satoshi. Used to register hashes in the blockchain password (str): Password for the Refill wallet. Used to sign the transaction min_confirmations (int): Number of confirmations when chosing the inputs of the transaction. Defaults to 6 sync (bool): Perform the transaction in synchronous mode, the call to the function will block until there is at least on confirmation on the blockchain. Defaults to False Returns: str: transaction id
def get_split_adjusted_asof_idx(self, dates): split_adjusted_asof_idx = dates.searchsorted( self._split_adjusted_asof ) if split_adjusted_asof_idx == len(dates): split_adjusted_asof_idx = len(dates) - 1 elif self._split_adjusted_asof < dates[0].tz_localize(None): split_adjusted_asof_idx = -1 return split_adjusted_asof_idx
Compute the index in `dates` where the split-adjusted-asof-date falls. This is the date up to which, and including which, we will need to unapply all adjustments for and then re-apply them as they come in. After this date, adjustments are applied as normal. Parameters ---------- dates : pd.DatetimeIndex The calendar dates over which the Pipeline is being computed. Returns ------- split_adjusted_asof_idx : int The index in `dates` at which the data should be split.
def get_channel_node_from_json(json_tree): channel = ChannelNode( title=json_tree['title'], description=json_tree['description'], source_domain=json_tree['source_domain'], source_id=json_tree['source_id'], language=json_tree['language'], thumbnail=json_tree.get('thumbnail', None), ) return channel
Build `ChannelNode` from json data provided in `json_tree`.
def specfn_quant_generator(specfiles, quantfiles, tag, ignore_tags): for specfn, qfn in zip(specfiles, quantfiles): for quant_el in basereader.generate_xmltags(qfn, tag, ignore_tags): yield os.path.basename(specfn), quant_el
Generates tuples of specfile and quant element for general formats
def clean_value(self, value): if self._clean_value: return self._clean_value(value) else: return self.reduce_value(value)
Cleans a value, using either the user provided clean_value, or cls.reduce_value.
def create_organization(self, auth, owner_name, org_name, full_name=None, description=None, website=None, location=None): data = { "username": org_name, "full_name": full_name, "description": description, "website": website, "location": location } url = "/admin/users/{u}/orgs".format(u=owner_name) response = self.post(url, auth=auth, data=data) return GogsOrg.from_json(response.json())
Creates a new organization, and returns the created organization. :param auth.Authentication auth: authentication object, must be admin-level :param str owner_name: Username of organization owner :param str org_name: Organization name :param str full_name: Full name of organization :param str description: Description of the organization :param str website: Official website :param str location: Organization location :return: a representation of the created organization :rtype: GogsOrg :raises NetworkFailure: if there is an error communicating with the server :raises ApiFailure: if the request cannot be serviced
def chunk(self, maxSize): chunks = [] currentSize = maxSize + 1 for i in self: if currentSize >= maxSize: currentSize = 0 chunks.append(type(self)({i}, name = 'Chunk-{}-of-{}'.format(len(chunks), self.name), quietStart = True)) else: chunks[-1].add(i) currentSize += 1 return chunks
Splits the `Collection` into _maxSize_ size or smaller `Collections` # Parameters _maxSize_ : `int` > The maximum number of elements in a retuned `Collection` # Returns `list [Collection]` > A list of `Collections` that if all merged (`|` operator) would create the original
def from_string(input_str) -> 'MissionTime': match = RE_INPUT_STRING.match(input_str) if not match: raise ValueError(f'badly formatted date/time: {input_str}') return MissionTime( datetime.datetime( int(match.group('year')), int(match.group('month')), int(match.group('day')), int(match.group('hour')), int(match.group('minute')), int(match.group('second')), ) )
Creates a MissionTime instance from a string Format: YYYYMMDDHHMMSS Args: input_str: string to parse Returns: MissionTime instance