code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def ensure_has_same_campaigns(self): lhs_yaml = osp.join(self.lhs, 'campaign.yaml') rhs_yaml = osp.join(self.rhs, 'campaign.yaml') assert osp.isfile(lhs_yaml) assert osp.isfile(rhs_yaml) assert filecmp.cmp(lhs_yaml, rhs_yaml)
Ensure that the 2 campaigns to merge have been generated from the same campaign.yaml
def ports_mapping(self, ports): if ports != self._ports_mapping: if len(self._nios) > 0: raise NodeError("Can't modify a cloud already connected.") port_number = 0 for port in ports: port["port_number"] = port_number port_number += 1 self._ports_mapping = ports
Set the ports on this cloud. :param ports: ports info
def to_snake(camel): if not camel: return camel return ''.join('_' + x if 'A' <= x <= 'Z' else x for x in camel).lower()[camel[0].isupper():]
TimeSkill -> time_skill
def create(self, period: int, limit: int): self.period = period self.limit = limit
Creates a rate limiting rule with rate limiting period and attempt limit :param period: Rate limiting period in seconds :type period: int :param limit: Number of attempts permitted by rate limiting within a given period :type limit: int
def call_operation(self, operation, **kwargs): data = {'operation': operation} data.update(kwargs) return self.invoke(data)
A generic method to call any operation supported by the Lambda handler
def skip_common_stack_elements(stacktrace, base_case): for i, (trace, base) in enumerate(zip(stacktrace, base_case)): if trace != base: return stacktrace[i:] return stacktrace[-1:]
Skips items that the target stacktrace shares with the base stacktrace.
def term_width(): if fcntl and termios: try: winsize = fcntl.ioctl(0, termios.TIOCGWINSZ, ' ') _, width = struct.unpack('hh', winsize) return width except IOError: pass elif windll and create_string_buffer: stderr_handle, struct_size = -12, 22 handle = windll.kernel32.GetStdHandle(stderr_handle) csbi = create_string_buffer(struct_size) res = windll.kernel32.GetConsoleScreenBufferInfo(handle, csbi) if res: (_, _, _, _, _, left, _, right, _, _, _) = struct.unpack('hhhhHhhhhhh', csbi.raw) return right - left + 1 else: return 0
Return the column width of the terminal, or ``None`` if it can't be determined.
def add_device(self, path): hdevice = self._libinput.libinput_path_add_device( self._li, path.encode()) if hdevice: return Device(hdevice, self._libinput) return None
Add a device to a libinput context. If successful, the device will be added to the internal list and re-opened on :meth:`~libinput.LibInput.resume`. The device can be removed with :meth:`remove_device`. If the device was successfully initialized, it is returned. Args: path (str): Path to an input device. Returns: ~libinput.define.Device: A device object or :obj:`None`.
def edit_content(self, original_lines, file_name): lines = [self.edit_line(line) for line in original_lines] for function in self._functions: try: lines = list(function(lines, file_name)) except UnicodeDecodeError as err: log.error('failed to process %s: %s', file_name, err) return lines except Exception as err: log.error("failed to process %s with code %s: %s", file_name, function, err) raise return lines
Processes a file contents. First processes the contents line by line applying the registered expressions, then process the resulting contents using the registered functions. Arguments: original_lines (list of str): file content. file_name (str): name of the file.
def delete(name, region=None, key=None, keyid=None, profile=None): conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) if not exists(name, region, key, keyid, profile): return True try: conn.delete_load_balancer(name) log.info('Deleted ELB %s.', name) return True except boto.exception.BotoServerError as error: log.error('Failed to delete ELB %s', name, exc_info_on_loglevel=logging.DEBUG) return False
Delete an ELB. CLI example to delete an ELB: .. code-block:: bash salt myminion boto_elb.delete myelb region=us-east-1
def _add_linux_ethernet(self, port_info, bridge_name): interface = port_info["interface"] if gns3server.utils.interfaces.is_interface_bridge(interface): network_interfaces = [interface["name"] for interface in self._interfaces()] i = 0 while True: tap = "gns3tap{}-{}".format(i, port_info["port_number"]) if tap not in network_interfaces: break i += 1 yield from self._ubridge_send('bridge add_nio_tap "{name}" "{interface}"'.format(name=bridge_name, interface=tap)) yield from self._ubridge_send('brctl addif "{interface}" "{tap}"'.format(tap=tap, interface=interface)) else: yield from self._ubridge_send('bridge add_nio_linux_raw {name} "{interface}"'.format(name=bridge_name, interface=interface))
Use raw sockets on Linux. If interface is a bridge we connect a tap to it
def build_rotation(vec3): if not isinstance(vec3, Vector3): raise ValueError("rotAmt must be a Vector3") return Matrix4.x_rotate(vec3.x) * Matrix4.y_rotate(vec3.y) * Matrix4.z_rotate(vec3.z)
Build a rotation matrix. vec3 is a Vector3 defining the axis about which to rotate the object.
def get_deposit_address(self, currency): data = { 'currency': currency } return self._get('deposit-addresses', True, data=data)
Get deposit address for a currency https://docs.kucoin.com/#get-deposit-address :param currency: Name of currency :type currency: string .. code:: python address = client.get_deposit_address('NEO') :returns: ApiResponse .. code:: python { "address": "0x78d3ad1c0aa1bf068e19c94a2d7b16c9c0fcd8b1", "memo": "5c247c8a03aa677cea2a251d" } :raises: KucoinResponseException, KucoinAPIException
def prune(self, filter_func=None, from_stash='active', to_stash='pruned'): def _prune_filter(state): to_prune = not filter_func or filter_func(state) if to_prune and not state.satisfiable(): if self._hierarchy: self._hierarchy.unreachable_state(state) self._hierarchy.simplify() return True return False self.move(from_stash, to_stash, _prune_filter) return self
Prune unsatisfiable states from a stash. This function will move all unsatisfiable states in the given stash into a different stash. :param filter_func: Only prune states that match this filter. :param from_stash: Prune states from this stash. (default: 'active') :param to_stash: Put pruned states in this stash. (default: 'pruned') :returns: The simulation manager, for chaining. :rtype: SimulationManager
def send_message(ctx, scheduler_rpc, project, message): if isinstance(scheduler_rpc, six.string_types): scheduler_rpc = connect_rpc(ctx, None, scheduler_rpc) if scheduler_rpc is None and os.environ.get('SCHEDULER_NAME'): scheduler_rpc = connect_rpc(ctx, None, 'http://%s/' % ( os.environ['SCHEDULER_PORT_23333_TCP'][len('tcp://'):])) if scheduler_rpc is None: scheduler_rpc = connect_rpc(ctx, None, 'http://127.0.0.1:23333/') return scheduler_rpc.send_task({ 'taskid': utils.md5string('data:,on_message'), 'project': project, 'url': 'data:,on_message', 'fetch': { 'save': ('__command__', message), }, 'process': { 'callback': '_on_message', } })
Send Message to project from command line
def show_dbs(): l = [] mc = client_connector() if not mc: return () dbs = mc.database_names() for d in dbs: dbc = mc[d] collections = dbc.collection_names() collections = remove_values_from_list(collections, "system.indexes") l.append({"name": d, "collections": collections}) return tuple(l)
return a list of all dbs and related collections. Return an empty list on error.
def set_main_fan(self, main_fan): if type(main_fan) != int and main_fan not in range(0, 11): raise InvalidInput("Main fan value must be int between 0-10") self._config['main_fan'] = main_fan self._q.put(self._config)
Set the main fan config. :param main_fan: Value to set the main fan :type main_fan: int [0-10] :returns: None :raises: InvalidInput
def togpx(self): element = create_elem(self.__class__._elem_name, {'lat': str(self.latitude), 'lon': str(self.longitude)}) if self.name: element.append(create_elem('name', text=self.name)) if self.description: element.append(create_elem('desc', text=self.description)) if self.elevation: element.append(create_elem('ele', text=str(self.elevation))) if self.time: element.append(create_elem('time', text=self.time.isoformat())) return element
Generate a GPX waypoint element subtree. Returns: etree.Element: GPX element
def get_connection_id(self, conn_or_int_id): key = conn_or_int_id if isinstance(key, str): table = self._int_connections elif isinstance(key, int): table = self._connections else: raise ArgumentError("You must supply either an int connection id or a string internal id to _get_connection_state", id=key) try: data = table[key] except KeyError: raise ArgumentError("Could not find connection by id", id=key) return data['conn_id']
Get the connection id. Args: conn_or_int_id (int, string): The external integer connection id or and internal string connection id Returns: dict: The context data associated with that connection or None if it cannot be found. Raises: ArgumentError: When the key is not found in the list of active connections or is invalid.
def _loadChildRules(context, xmlElement, attributeToFormatMap): rules = [] for ruleElement in xmlElement.getchildren(): if not ruleElement.tag in _ruleClassDict: raise ValueError("Not supported rule '%s'" % ruleElement.tag) rule = _ruleClassDict[ruleElement.tag](context, ruleElement, attributeToFormatMap) rules.append(rule) return rules
Extract rules from Context or Rule xml element
def __join_connections(self): interval_s = nsq.config.client.CONNECTION_CLOSE_AUDIT_WAIT_S graceful_wait_s = nsq.config.client.CONNECTION_QUIT_CLOSE_TIMEOUT_S graceful = False while graceful_wait_s > 0: if not self.__connections: break connected_list = [c.is_connected for (n, c, g) in self.__connections] if any(connected_list) is False: graceful = True break gevent.sleep(interval_s) graceful_wait_s -= interval_s if graceful is False: connected_list = [c for (n, c, g) in self.__connections if c.is_connected] _logger.error("We were told to terminate, but not all " "connections were stopped: [%s]", connected_list)
Wait for all connections to close. There are no side-effects here. We just want to try and leave -after- everything has closed, in general.
def _do_authorize(self): if self.auth_data is None: raise ApiwatcherClientException("You must provide authorization data.") r = requests.post( "{0}/api/token".format(self.base_url), json=self.auth_data, verify=self.verify_certificate, timeout=self.timeout ) if r.status_code == 401: raise ApiwatcherClientException("Wrong credentials supplied: {0}".format( r.json()["message"] )) elif r.status_code != 201: try: reason = r.json()["message"] except: reason = r.text raise ApiwatcherClientException( "Authorization failed. Reason {0} {1}".format( r.status_code, reason) ) else: data = r.json()["data"] self.token = data["access_token"] if "refresh_token" in data: self.auth_data = { "grant_type": "refresh_token", "refresh_token": data["refresh_token"], "client_id": self.auth_data["client_id"] }
Perform the authorization
def create_db(self): self.add_role(self.auth_role_admin) self.add_role(self.auth_role_public) if self.count_users() == 0: log.warning(LOGMSG_WAR_SEC_NO_USER)
Setups the DB, creates admin and public roles if they don't exist.
def get_ss_class(pdb_file, dssp_file, chain): prag = pr.parsePDB(pdb_file) pr.parseDSSP(dssp_file, prag) alpha, threeTen, beta = get_dssp_ss_content_multiplechains(prag, chain) if alpha == 0 and beta > 0: classification = 'all-beta' elif beta == 0 and alpha > 0: classification = 'all-alpha' elif beta == 0 and alpha == 0: classification = 'mixed' elif float(alpha) / beta >= 20: classification = 'all-alpha' else: classification = 'mixed' return classification
Define the secondary structure class of a PDB file at the specific chain Args: pdb_file: dssp_file: chain: Returns:
def _compute(self): tendencies = {} for name, value in self.state.items(): tendencies[name] = value * 0. return tendencies
Where the tendencies are actually computed... Needs to be implemented for each daughter class Returns a dictionary with same keys as self.state
def pprint_ast(node, include_attributes=INCLUDE_ATTRIBUTES_DEFAULT, indent=INDENT_DEFAULT, file=None): if file is None: file = sys.stdout print( pformat_ast( node, include_attributes=include_attributes, indent=indent ), file=file, )
Pretty-print an AST tree. Parameters ---------- node : ast.AST Top-level node to render. include_attributes : bool, optional Whether to include node attributes. Default False. indent : str, optional. Indentation string for nested expressions. Default is two spaces. file : None or file-like object, optional File to use to print output. If the default of `None` is passed, we use sys.stdout.
def from_pubkey_line(cls, line): options, key_without_options = cls._extract_options(line) if key_without_options == '': raise ValueError("Empty key") fields = key_without_options.strip().split(None, 2) if len(fields) == 3: type_str, data64, comment = fields elif len(fields) == 2: type_str, data64 = fields comment = None else: raise ValueError("Key has insufficient number of fields") try: data = b64decode(data64) except (binascii.Error, TypeError): raise ValueError("Key contains invalid data") key_type = next(iter_prefixed(data)) if key_type == b'ssh-rsa': key_class = RSAKey elif key_type == b'ssh-dss': key_class = DSAKey elif key_type.startswith(b'ecdsa-'): key_class = ECDSAKey else: raise ValueError('Unknown key type {}'.format(key_type)) return key_class(b64decode(data64), comment, options=options)
Generate Key instance from a a string. Raise ValueError if string is malformed
def infos_on_basis_set(self): o = [] o.append("=========================================") o.append("Reading basis set:") o.append("") o.append(" Basis set for {} atom ".format(str(self.filename))) o.append(" Maximum angular momentum = {}".format(self.data['lmax'])) o.append(" Number of atomics orbitals = {}".format(self.data['n_nlo'])) o.append(" Number of nlm orbitals = {}".format(self.data['n_nlmo'])) o.append("=========================================") return str(0)
infos on the basis set as in Fiesta log
def UpdateSet(self, dataset): for data in dataset: for hypo in self.Values(): like = self.Likelihood(data, hypo) self.Mult(hypo, like) return self.Normalize()
Updates each hypothesis based on the dataset. This is more efficient than calling Update repeatedly because it waits until the end to Normalize. Modifies the suite directly; if you want to keep the original, make a copy. dataset: a sequence of data returns: the normalizing constant
def python_to_ucn(uni_char, as_bytes=False): ucn = uni_char.encode('unicode_escape').decode('latin1') ucn = text_type(ucn).replace('\\', '').upper().lstrip('U') if len(ucn) > int(4): ucn = ucn.lstrip("0") ucn = "U+" + ucn.upper() if as_bytes: ucn = ucn.encode('latin1') return ucn
Return UCN character from Python Unicode character. Converts a one character Python unicode string (e.g. u'\\u4e00') to the corresponding Unicode UCN ('U+4E00').
def start(): action_set('meta.start', time.strftime('%Y-%m-%dT%H:%M:%SZ')) COLLECT_PROFILE_DATA = '/usr/local/bin/collect-profile-data' if os.path.exists(COLLECT_PROFILE_DATA): subprocess.check_output([COLLECT_PROFILE_DATA])
If the collectd charm is also installed, tell it to send a snapshot of the current profile data.
def get_format(self): url = self.build_url(self._endpoints.get('get_format')) response = self.session.get(url) if not response: return None return self.range_format_constructor(parent=self, **{self._cloud_data_key: response.json()})
Returns a RangeFormat instance with the format of this range
def _parse(self, stream, context, path): length = self.length(context) new_stream = BytesIO(construct.core._read_stream(stream, length)) return self.subcon._parse(new_stream, context, path)
Parse tunnel.
def _AnyMessageToJsonObject(self, message): if not message.ListFields(): return {} js = OrderedDict() type_url = message.type_url js['@type'] = type_url sub_message = _CreateMessageFromTypeUrl(type_url) sub_message.ParseFromString(message.value) message_descriptor = sub_message.DESCRIPTOR full_name = message_descriptor.full_name if _IsWrapperMessage(message_descriptor): js['value'] = self._WrapperMessageToJsonObject(sub_message) return js if full_name in _WKTJSONMETHODS: js['value'] = methodcaller(_WKTJSONMETHODS[full_name][0], sub_message)(self) return js return self._RegularMessageToJsonObject(sub_message, js)
Converts Any message according to Proto3 JSON Specification.
def init(name, *args, **kwargs): if name in _PLUGIN_CATALOG: if rapport.config.get_int("rapport", "verbosity") >= 2: print("Initialize plugin {0}: {1} {2}".format(name, args, kwargs)) try: return _PLUGIN_CATALOG[name](*args, **kwargs) except (ValueError, TypeError) as e: print("Failed to initialize plugin {0}: {1}!".format(name, e), file=sys.stderr) else: print("Failed to initialize plugin {0}: Not in catalog!".format(name), file=sys.stderr)
Instantiate a plugin from the catalog.
def substitute_symbol_table(table, version, max_id): if not table.table_type.is_shared: raise ValueError('Symbol table to substitute from must be a shared table') if version <= 0: raise ValueError('Version must be grater than or equal to 1: %s' % version) if max_id < 0: raise ValueError('Max ID must be zero or positive: %s' % max_id) if max_id <= table.max_id: symbols = (token.text for token in islice(table, max_id)) else: symbols = chain( (token.text for token in table), repeat(None, max_id - table.max_id) ) return SymbolTable( table_type=SHARED_TABLE_TYPE, symbols=symbols, name=table.name, version=version, is_substitute=True )
Substitutes a given shared symbol table for another version. * If the given table has **more** symbols than the requested substitute, then the generated symbol table will be a subset of the given table. * If the given table has **less** symbols than the requested substitute, then the generated symbol table will have symbols with unknown text generated for the difference. Args: table (SymbolTable): The shared table to derive from. version (int): The version to target. max_id (int): The maximum ID allocated by the substitute, must be ``>= 0``. Returns: SymbolTable: The synthesized table.
def fit(self, X, y=None, categorical=None): if categorical is not None: assert isinstance(categorical, (int, list, tuple)), "The 'categorical' \ argument needs to be an integer with the index of the categorical \ column in your data, or a list or tuple of several of them, \ but it is a {}.".format(type(categorical)) X = pandas_to_numpy(X) random_state = check_random_state(self.random_state) self._enc_cluster_centroids, self._enc_map, self.labels_, self.cost_,\ self.n_iter_, self.gamma = k_prototypes(X, categorical, self.n_clusters, self.max_iter, self.num_dissim, self.cat_dissim, self.gamma, self.init, self.n_init, self.verbose, random_state, self.n_jobs) return self
Compute k-prototypes clustering. Parameters ---------- X : array-like, shape=[n_samples, n_features] categorical : Index of columns that contain categorical data
def nextChild(hotmap, index): nextChildIndex = min(index + 1, len(hotmap) - 1) return hotmap[nextChildIndex][1]
Return the next sibling of the node indicated by index.
def _deserialize_dict(data, boxed_type): return {k: _deserialize(v, boxed_type) for k, v in six.iteritems(data)}
Deserializes a dict and its elements. :param data: dict to deserialize. :type data: dict :param boxed_type: class literal. :return: deserialized dict. :rtype: dict
def _process_messages(self, messages, ignore_unknown_message_types=False): with self.before_after_send_handling(): for message_id, message_data in messages.items(): message_model, dispatch_models = message_data try: message_cls = get_registered_message_type(message_model.cls) except UnknownMessageTypeError: if ignore_unknown_message_types: continue raise message_type_cache = None for dispatch in dispatch_models: if not dispatch.message_cache: try: if message_type_cache is None and not message_cls.has_dynamic_context: message_type_cache = message_cls.compile(message_model, self, dispatch=dispatch) dispatch.message_cache = message_type_cache or message_cls.compile( message_model, self, dispatch=dispatch) except Exception as e: self.mark_error(dispatch, e, message_cls) self.send(message_cls, message_model, dispatch_models)
Performs message processing. :param dict messages: indexed by message id dict with messages data :param bool ignore_unknown_message_types: whether to silence exceptions :raises UnknownMessageTypeError:
def _retrieve_and_validate_certificate_chain(self, cert_url): self._validate_certificate_url(cert_url) cert_chain = self._load_cert_chain(cert_url) self._validate_cert_chain(cert_chain) return cert_chain
Retrieve and validate certificate chain. This method validates if the URL is valid and loads and validates the certificate chain, before returning it. :param cert_url: URL for retrieving certificate chain :type cert_url: str :return The certificate chain loaded from the URL :rtype cryptography.x509.Certificate :raises: :py:class:`VerificationException` if the URL is invalid, if the loaded certificate chain is invalid
def filter(self): logging.info('Filtering contigs') for i in range(self.cpus): threads = Thread(target=self.filterthreads, args=()) threads.setDaemon(True) threads.start() with progressbar(self.metadata) as bar: for sample in bar: if sample.general.bestassemblyfile != 'NA': sample.general.contigsfile = sample.general.assemblyfile self.filterqueue.put(sample) self.filterqueue.join()
Filter contigs based on depth
def bootstrap_stat(arr, stat=np.mean, n_iters=1000, alpha=0.05): stat_orig = stat(arr, 0) boot_arr = np.empty((arr.shape[-1] , n_iters)) for ii in xrange(n_iters): this_arr=arr[np.random.random_integers(0, arr.shape[0]-1, arr.shape[0])] boot_arr[:, ii] = stat(this_arr, 0) eb = np.array([stats.scoreatpercentile(boot_arr[xx], 1-(alpha/2)) - stats.scoreatpercentile(boot_arr[xx], alpha/2) for xx in range(boot_arr.shape[0])]) return stat_orig, eb
Produce a boot-strap distribution of the mean of an array on axis 0 Parameters --------- arr : ndarray The array with data to be bootstrapped stat : callable The statistical function to call. will be called as `stat(arr, 0)`, so needs to accept that call signature. n_iters : int The number of bootstrap iterations to sample alpha : float The confidence interval size will be 1-alpha
def style_defs(cls): formatter = HtmlFormatter() formatter.style.highlight_color = cls.VIOLATION_COLOR return formatter.get_style_defs()
Return the CSS style definitions required by the formatted snippet.
def descriptions(self): return {key: val[2] for key, val in six.iteritems(self.defaultParams) if len(val) >= 3}
The description of each keyword in the rcParams dictionary
def list_policies(self, filters=None): _, policy_list = self.handler.streamed_request("list-policies", "list-policy", filters) return policy_list
Retrieve installed trap, drop and bypass policies. :param filters: retrieve only matching policies (optional) :type filters: dict :return: list of installed trap, drop and bypass policies :rtype: list
def get_hash(self, shallow=True, refresh=False): if shallow: if not hasattr(self, '_shallow_hash') or self._shallow_hash is None\ or refresh: self._shallow_hash = make_hash(self.matches_key(), 14) ret = self._shallow_hash else: if not hasattr(self, '_full_hash') or self._full_hash is None \ or refresh: ev_mk_list = sorted([ev.matches_key() for ev in self.evidence]) self._full_hash = \ make_hash(self.matches_key() + str(ev_mk_list), 16) ret = self._full_hash return ret
Get a hash for this Statement. There are two types of hash, "shallow" and "full". A shallow hash is as unique as the information carried by the statement, i.e. it is a hash of the `matches_key`. This means that differences in source, evidence, and so on are not included. As such, it is a shorter hash (14 nibbles). The odds of a collision among all the statements we expect to encounter (well under 10^8) is ~10^-9 (1 in a billion). Checks for collisions can be done by using the matches keys. A full hash includes, in addition to the matches key, information from the evidence of the statement. These hashes will be equal if the two Statements came from the same sentences, extracted by the same reader, from the same source. These hashes are correspondingly longer (16 nibbles). The odds of a collision for an expected less than 10^10 extractions is ~10^-9 (1 in a billion). Note that a hash of the Python object will also include the `uuid`, so it will always be unique for every object. Parameters ---------- shallow : bool Choose between the shallow and full hashes described above. Default is true (e.g. a shallow hash). refresh : bool Used to get a new copy of the hash. Default is false, so the hash, if it has been already created, will be read from the attribute. This is primarily used for speed testing. Returns ------- hash : int A long integer hash.
def blend_color(color, color2): r1, g1, b1 = hex_to_rgb(color) r2, g2, b2 = hex_to_rgb(color2) r3 = int(0.5 * r1 + 0.5 * r2) g3 = int(0.5 * g1 + 0.5 * g2) b3 = int(0.5 * b1 + 0.5 * b2) return rgb_to_hex((r3, g3, b3))
Blend two colors together.
def _get_xml_dom(self): if self.site_control == SITE_CONTROL_NONE and \ any((self.domains, self.header_domains, self.identities)): raise TypeError(BAD_POLICY) policy_type = minidom.createDocumentType( qualifiedName='cross-domain-policy', publicId=None, systemId='http://www.adobe.com/xml/dtds/cross-domain-policy.dtd' ) policy = minidom.createDocument( None, 'cross-domain-policy', policy_type ) if self.site_control is not None: control_element = policy.createElement('site-control') control_element.setAttribute( 'permitted-cross-domain-policies', self.site_control ) policy.documentElement.appendChild(control_element) for elem_type in ('domains', 'header_domains', 'identities'): getattr(self, '_add_{}_xml'.format(elem_type))(policy) return policy
Collects all options set so far, and produce and return an ``xml.dom.minidom.Document`` representing the corresponding XML.
def all_ports(self): from mbuild.port import Port return [successor for successor in self.successors() if isinstance(successor, Port)]
Return all Ports referenced by this Compound and its successors Returns ------- list of mb.Compound A list of all Ports referenced by this Compound and its successors
def get_system_bck_color(): def merged_colors(colorA, colorB, factor): maxFactor = 100 colorA = QtGui.QColor(colorA) colorB = QtGui.QColor(colorB) tmp = colorA tmp.setRed((tmp.red() * factor) / maxFactor + (colorB.red() * (maxFactor - factor)) / maxFactor) tmp.setGreen((tmp.green() * factor) / maxFactor + (colorB.green() * (maxFactor - factor)) / maxFactor) tmp.setBlue((tmp.blue() * factor) / maxFactor + (colorB.blue() * (maxFactor - factor)) / maxFactor) return tmp pal = QtWidgets.QApplication.instance().palette() b = pal.window().color() h = pal.highlight().color() return merged_colors(b, h, 50)
Gets a system color for drawing the fold scope background.
def ExamineEvent(self, mediator, event): self._EnsureRequesterStarted() path_spec = event.pathspec event_identifiers = self._event_identifiers_by_pathspec[path_spec] event_identifier = event.GetIdentifier() event_identifiers.append(event_identifier) if event.data_type not in self.DATA_TYPES or not self._analyzer.lookup_hash: return lookup_hash = '{0:s}_hash'.format(self._analyzer.lookup_hash) lookup_hash = getattr(event, lookup_hash, None) if not lookup_hash: display_name = mediator.GetDisplayNameForPathSpec(path_spec) logger.warning(( 'Lookup hash attribute: {0:s}_hash missing from event that ' 'originated from: {1:s}.').format( self._analyzer.lookup_hash, display_name)) return path_specs = self._hash_pathspecs[lookup_hash] path_specs.append(path_spec) if len(path_specs) == 1: self.hash_queue.put(lookup_hash)
Evaluates whether an event contains the right data for a hash lookup. Args: mediator (AnalysisMediator): mediates interactions between analysis plugins and other components, such as storage and dfvfs. event (EventObject): event.
def unwrap(self, value, session=None): self.validate_unwrap(value) value = self.item_type.unwrap(value, session=session) for val in self.values: if val == value: return val self._fail_validation(value, 'Value was not in the enum values')
Unwrap value using the unwrap function from ``EnumField.item_type``. Since unwrap validation could not happen in is_valid_wrap, it happens in this function.
def auth_with_token(self, token, tenant_id=None, tenant_name=None): main_resp, main_body = self._call_token_auth(token, tenant_id, tenant_name) roles = main_body["access"]["user"]["roles"] ostore = [role for role in roles if role["name"] == "object-store:default"] if ostore: ostore_tenant_id = ostore[0]["tenantId"] ostore_resp, ostore_body = self._call_token_auth(token, ostore_tenant_id, None) ostore_cat = ostore_body["access"]["serviceCatalog"] main_cat = main_body["access"]["serviceCatalog"] main_cat.extend(ostore_cat) self._parse_response(main_body) self.authenticated = True
If a valid token is already known, this call will use it to generate the service catalog.
def register_namespace(self, namespace, module): if namespace in self._namespaces: raise AlreadyRegisteredError("Namespace '{0}' is already registered on loader.".format(namespace)) self._namespaces[namespace] = module
Register a namespace. :param namespace: Namespace tag. :type namespace: str :param module: must be a string or a module object to register. :type module: str
def csv_row_to_transaction(index, row, source_encoding="latin1", date_format="%d-%m-%Y", thousand_sep=".", decimal_sep=","): xfer, posted, message, amount, total = row xfer = Parse.date(xfer) posted = Parse.date(posted) message = Parse.to_utf8(message, source_encoding) amount = Parse.money(amount) total = Parse.money(total) return Transaction(index, xfer, posted, message, amount, total)
Parses a row of strings to a ``Transaction`` object. Args: index: The index of this row in the original CSV file. Used for sorting ``Transaction``s by their order of appearance. row: The row containing strings for [transfer_date, posted_date, message, money_amount, money_total]. source_encoding: The encoding that will be used to decode strings to UTF-8. date_format: The format of dates in this row. thousand_sep: The thousand separator in money amounts. decimal_sep: The decimal separator in money amounts. Returns: A ``Transaction`` object.
def push(self, record, shard): heapq.heappush(self.heap, heap_item(self.clock, record, shard))
Push a new record into the buffer :param dict record: new record :param shard: Shard the record came from :type shard: :class:`~bloop.stream.shard.Shard`
def _finish_pending_requests(self) -> None: while True: num_q, ok_list, err_list = self._multi.info_read() for curl in ok_list: self._finish(curl) for curl, errnum, errmsg in err_list: self._finish(curl, errnum, errmsg) if num_q == 0: break self._process_queue()
Process any requests that were completed by the last call to multi.socket_action.
def spi_ss_polarity(self, polarity): ret = api.py_aa_spi_master_ss_polarity(self.handle, polarity) _raise_error_if_negative(ret)
Change the ouput polarity on the SS line. Please note, that this only affects the master functions.
def title(cls, message=None): if message == None: return getproctitle() else: setproctitle('qless-py-worker %s' % message) logger.info(message)
Set the title of the process
def summary_plot(data): cats = OrderedDict() cats = { 'inniepairs': { 'name': 'Combined innie pairs', 'color': ' }, 'outiepairs': { 'name': 'Combined outie pairs', 'color': ' }, 'uncombopairs': { 'name': 'Uncombined pairs', 'color': ' }, 'discardpairs': { 'name': 'Discarded pairs', 'color': ' } } splotconfig = {'id': 'flash_combo_stats_plot', 'title': 'FLASh: Read combination statistics', 'ylab': 'Number of read pairs', 'hide_zero_cats': False } return bargraph.plot(data, cats, splotconfig)
Barplot of combined pairs
def parsed(self): if not self._parsed: self._parsed = ConfigParser() self._parsed.readfp(io.StringIO(self.content)) return self._parsed
Get the ConfigParser object which represents the content. This property is cached and only parses the content once.
def array_to_json(array_like): def default(_array_like): if hasattr(_array_like, 'tolist'): return _array_like.tolist() return json.JSONEncoder().default(_array_like) return json.dumps(array_like, default=default)
Convert an array like object to JSON. To understand better what an array like object is see: https://docs.scipy.org/doc/numpy/user/basics.creation.html#converting-python-array-like-objects-to-numpy-arrays Args: array_like (np.array or Iterable or int or float): array like object to be converted to JSON. Returns: (str): object serialized to JSON
def check_type_and_size_of_param_list(param_list, expected_length): try: assert isinstance(param_list, list) assert len(param_list) == expected_length except AssertionError: msg = "param_list must be a list containing {} elements." raise ValueError(msg.format(expected_length)) return None
Ensure that param_list is a list with the expected length. Raises a helpful ValueError if this is not the case.
def read_archive(archive, fname): zfile = zipfile.ZipFile(archive) contents = zfile.open(fname, 'r') fobj = io.TextIOWrapper(contents) seen = set() for line in fobj: line = line.rstrip('\t|\n') if line not in seen: yield line.split('\t|\t') seen.add(line)
Return an iterator of unique rows from a zip archive. * archive - path to the zip archive. * fname - name of the compressed file within the archive.
def _load_ssh_files(self): if self._runtime_ssh_path is not None: path = self._runtime_ssh_path if not os.path.exists(path): msg = "No such file or directory: {!r}".format(path) raise IOError(errno.ENOENT, msg) self._load_ssh_file(os.path.expanduser(path)) elif self.load_ssh_configs: for path in (self._user_ssh_path, self._system_ssh_path): self._load_ssh_file(os.path.expanduser(path))
Trigger loading of configured SSH config file paths. Expects that ``base_ssh_config`` has already been set to an `~paramiko.config.SSHConfig` object. :returns: ``None``.
def get_route_streams(self, route_id): result_fetcher = functools.partial(self.protocol.get, '/routes/{id}/streams/'.format(id=route_id)) streams = BatchedResultsIterator(entity=model.Stream, bind_client=self, result_fetcher=result_fetcher) return {i.type: i for i in streams}
Returns streams for a route. http://strava.github.io/api/v3/streams/#routes Streams represent the raw data of the saved route. External applications may access this information for all public routes and for the private routes of the authenticated athlete. The 3 available route stream types `distance`, `altitude` and `latlng` are always returned. http://strava.github.io/api/v3/streams/#routes :param activity_id: The ID of activity. :type activity_id: int :return: A dictionary of :class:`stravalib.model.Stream`from the route. :rtype: :py:class:`dict`
def get_catalog_query_session(self): if not self.supports_catalog_query(): raise errors.Unimplemented() return sessions.CatalogQuerySession(runtime=self._runtime)
Gets the catalog query session. return: (osid.cataloging.CatalogQuerySession) - a ``CatalogQuerySession`` raise: OperationFailed - unable to complete request raise: Unimplemented - ``supports_catalog_query()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_catalog_query()`` is ``true``.*
def compile(self): self.context.set_query(self) with_frag = self.format_subqueries() select_frag = self.format_select_set() from_frag = self.format_table_set() where_frag = self.format_where() groupby_frag = self.format_group_by() order_frag = self.format_order_by() limit_frag = self.format_limit() query = '\n'.join( filter( None, [ with_frag, select_frag, from_frag, where_frag, groupby_frag, order_frag, limit_frag, ], ) ) return query
This method isn't yet idempotent; calling multiple times may yield unexpected results
def _appendstore(self, store): if not store.bitlength: return store = offsetcopy(store, (self.offset + self.bitlength) % 8) if store.offset: joinval = (self._rawarray.pop() & (255 ^ (255 >> store.offset)) | (store.getbyte(0) & (255 >> store.offset))) self._rawarray.append(joinval) self._rawarray.extend(store._rawarray[1:]) else: self._rawarray.extend(store._rawarray) self.bitlength += store.bitlength
Join another store on to the end of this one.
def write_bibtex_dict(stream, entries): from bibtexparser.bwriter import BibTexWriter writer = BibTexWriter() writer.indent = ' ' writer.entry_separator = '' first = True for rec in entries: if first: first = False else: stream.write(b'\n') stream.write(writer._entry_to_bibtex(rec).encode('utf8'))
bibtexparser.write converts the entire database to one big string and writes it out in one go. I'm sure it will always all fit in RAM but some things just will not stand.
def symmetrically_add_atom(self, specie, point, coords_are_cartesian=False): point2 = self.get_symmetric_site(point, cartesian=coords_are_cartesian) self.append(specie, point, coords_are_cartesian=coords_are_cartesian) self.append(specie, point2, coords_are_cartesian=coords_are_cartesian)
Class method for adding a site at a specified point in a slab. Will add the corresponding site on the other side of the slab to maintain equivalent surfaces. Arg: specie (str): The specie to add point (coords): The coordinate of the site in the slab to add. coords_are_cartesian (bool): Is the point in cartesian coordinates Returns: (Slab): The modified slab
def created(self): if 'id_perms' not in self: self.fetch() created = self['id_perms']['created'] return datetime.strptime(created, '%Y-%m-%dT%H:%M:%S.%f')
Return creation date :rtype: datetime :raises ResourceNotFound: resource not found on the API
def replace_missing_value(self, str_in): if self.missing_value is None: return str_in elif self.missing_value.sentinel == str_in: return self.missing_value.replace_with else: return str_in
returns 'str_in' if it is not equals to the 'sentinel' as defined in the missingValue section of the schema. Else it will return the 'replaceWith' value. :param str_in: :return: str_in or the missingValue replacement value
def setXr(self, Xr): self.Xr = Xr self.gp_block.covar.G = Xr
set genotype data of the set component
def documentation(self, level='first'): docs = (t.docstring for t in list(self.conjunction.terms) + [self] if t.docstring is not None) if level.lower() == 'first': doc = next(docs, None) elif level.lower() == 'top': doc = list(docs) return doc
Return the documentation of the type. By default, this is the first docstring on a top-level term. By setting *level* to `"top"`, the list of all docstrings on top-level terms is returned, including the type's `docstring` value, if not `None`, as the last item. The docstring for the type itself is available via :attr:`TypeDefinition.docstring`. Args: level (str): `"first"` or `"top"` Returns: a single docstring or a list of docstrings
def transpose(self, semitone): for track in self.tracks: if not track.is_drum: track.transpose(semitone)
Transpose the pianorolls of all tracks by a number of semitones, where positive values are for higher key, while negative values are for lower key. The drum tracks are ignored. Parameters ---------- semitone : int The number of semitones to transpose the pianorolls.
def deletecols(X, cols): if isinstance(cols, str): cols = cols.split(',') retain = [n for n in X.dtype.names if n not in cols] if len(retain) > 0: return X[retain] else: return None
Delete columns from a numpy ndarry or recarray. Can take a string giving a column name or comma-separated list of column names, or a list of string column names. Implemented by the tabarray method :func:`tabular.tab.tabarray.deletecols`. **Parameters** **X** : numpy recarray or ndarray with structured dtype The numpy array from which to delete columns. **cols** : string or list of strings Name or list of names of columns in `X`. This can be a string giving a column name or comma-separated list of column names, or a list of string column names. **Returns** **out** : numpy ndarray with structured dtype New numpy ndarray with structured dtype given by `X`, excluding the columns named in `cols`.
def from_client(catalog, client_id, lowcut, highcut, samp_rate, filt_order, length, prepick, swin, process_len=86400, data_pad=90, all_horiz=False, delayed=True, plot=False, debug=0, return_event=False, min_snr=None): EQcorrscanDeprecationWarning( "Function is depreciated and will be removed soon. Use " "template_gen.template_gen instead.") temp_list = template_gen( method="from_client", catalog=catalog, client_id=client_id, lowcut=lowcut, highcut=highcut, samp_rate=samp_rate, filt_order=filt_order, length=length, prepick=prepick, swin=swin, process_len=process_len, data_pad=data_pad, all_horiz=all_horiz, delayed=delayed, plot=plot, debug=debug, return_event=return_event, min_snr=min_snr) return temp_list
Generate multiplexed template from FDSN client. Function to generate templates from an FDSN client. Must be given \ an obspy.Catalog class and the client_id as input. The function returns \ a list of obspy.Stream classes containing steams for each desired \ template. :type catalog: obspy.core.event.Catalog :param catalog: Catalog class containing desired template events :type client_id: str :param client_id: Name of the client, either url, or Obspy \ mappable (see the :mod:`obspy.clients.fdsn` documentation). :type lowcut: float :param lowcut: Low cut (Hz), if set to None will not apply a lowcut. :type highcut: float :param highcut: High cut (Hz), if set to None will not apply a highcut. :type samp_rate: float :param samp_rate: New sampling rate in Hz. :type filt_order: int :param filt_order: Filter level (number of corners). :type length: float :param length: Extract length in seconds. :type prepick: float :param prepick: Pre-pick time in seconds :type swin: str :param swin: P, S, P_all, S_all or all, defaults to all: see note in :func:`eqcorrscan.core.template_gen.template_gen` :type process_len: int :param process_len: Length of data in seconds to download and process. :param data_pad: Length of data (in seconds) required before and after \ any event for processing, use to reduce edge-effects of filtering on \ the templates. :type data_pad: int :type all_horiz: bool :param all_horiz: To use both horizontal channels even if there is only \ a pick on one of them. Defaults to False. :type delayed: bool :param delayed: If True, each channel will begin relative to it's own \ pick-time, if set to False, each channel will begin at the same time. :type plot: bool :param plot: Plot templates or not. :type debug: int :param debug: Level of debugging output, higher=more :type return_event: bool :param return_event: Whether to return the event and process length or not. :type min_snr: float :param min_snr: Minimum signal-to-noise ratio for a channel to be included in the template, where signal-to-noise ratio is calculated as the ratio of the maximum amplitude in the template window to the rms amplitude in the whole window given. :returns: List of :class:`obspy.core.stream.Stream` Templates :rtype: list .. warning:: This function is depreciated and will be removed in a forthcoming release. Please use `template_gen` instead. .. note:: process_len should be set to the same length as used when computing detections using match_filter.match_filter, e.g. if you read in day-long data for match_filter, process_len should be 86400. .. rubric:: Example >>> from obspy.clients.fdsn import Client >>> from eqcorrscan.core.template_gen import from_client >>> client = Client('NCEDC') >>> catalog = client.get_events(eventid='72572665', includearrivals=True) >>> # We are only taking two picks for this example to speed up the >>> # example, note that you don't have to! >>> catalog[0].picks = catalog[0].picks[0:2] >>> templates = from_client(catalog=catalog, client_id='NCEDC', ... lowcut=2.0, highcut=9.0, samp_rate=20.0, ... filt_order=4, length=3.0, prepick=0.15, ... swin='all', process_len=300, ... all_horiz=True) >>> templates[0].plot(equal_scale=False, size=(800,600)) # doctest: +SKIP .. figure:: ../../plots/template_gen.from_client.png
def is_hash(fhash): if re.match(re_md5, fhash): return True elif re.match(re_sha1, fhash): return True elif re.match(re_sha256, fhash): return True elif re.match(re_sha512, fhash): return True elif re.match(re_ssdeep, fhash): return True else: return False
Returns true for valid hashes, false for invalid.
def forbid_web_access(f): @wraps(f) def wrapper_fn(*args, **kwargs): if isinstance(JobContext.get_current_context(), WebJobContext): raise ForbiddenError('Access forbidden from web.') return f(*args, **kwargs) return wrapper_fn
Forbids running task using http request. :param f: Callable :return Callable
def x_11paths_authorization(app_id, secret, context, utc=None): utc = utc or context.headers[X_11PATHS_DATE_HEADER_NAME] url_path = ensure_url_path_starts_with_slash(context.url_path) url_path_query = url_path if context.query_params: url_path_query += "?%s" % (url_encode(context.query_params, sort=True)) string_to_sign = (context.method.upper().strip() + "\n" + utc + "\n" + _get_11paths_serialized_headers(context.headers) + "\n" + url_path_query.strip()) if context.body_params and isinstance(context.renderer, FormRenderer): string_to_sign = string_to_sign + "\n" + url_encode(context.body_params, sort=True).replace("&", "") authorization_header_value = (AUTHORIZATION_METHOD + AUTHORIZATION_HEADER_FIELD_SEPARATOR + app_id + AUTHORIZATION_HEADER_FIELD_SEPARATOR + _sign_data(secret, string_to_sign)) return authorization_header_value
Calculate the authentication headers to be sent with a request to the API. :param app_id: :param secret: :param context :param utc: :return: array a map with the Authorization and Date headers needed to sign a Latch API request
def bulk_update(self, request): bid = utils.destroy_basket(request) for item_data in request.data: item = BasketItem(basket_id=bid, **item_data) item.save() serializer = BasketItemSerializer(self.get_queryset(request), many=True) response = Response(data=serializer.data, status=status.HTTP_200_OK) return response
Put multiple items in the basket, removing anything that already exists
def _encode_key(self, obj): if obj.__class__ is str: return self._encode_str(obj) if obj.__class__ is UUID: return '"' + str(obj) + '"' try: sx_encoder = obj.__mm_serialize__ except AttributeError: pass else: try: data = sx_encoder() except NotImplementedError: pass else: return self._encode_key(data) if isinstance(obj, UUID): return '"' + str(obj) + '"' if isinstance(obj, str): return self._encode_str(obj) try: value = self.default(obj) except TypeError: raise TypeError('{!r} is not a valid dictionary key'.format(obj)) return self._encode_key(value)
Encodes a dictionary key - a key can only be a string in std JSON
def tci_path(self): tci_paths = [ path for path in self.dataset._product_metadata.xpath( ".//Granule[@granuleIdentifier='%s']/IMAGE_FILE/text()" % self.granule_identifier ) if path.endswith('TCI') ] try: tci_path = tci_paths[0] except IndexError: return None return os.path.join( self.dataset._zip_root if self.dataset.is_zip else self.dataset.path, tci_path ) + '.jp2'
Return the path to the granules TrueColorImage.
def _set_sequence(cls, val): if db.engine.dialect.name == 'postgresql': db.session.execute( "SELECT setval(pg_get_serial_sequence(" "'{0}', 'recid'), :newval)".format( cls.__tablename__), dict(newval=val))
Internal function to reset sequence to specific value. Note: this function is for PostgreSQL compatibility. :param val: The value to be set.
def jieba_tokenize(text, external_wordlist=False): global jieba_tokenizer, jieba_orig_tokenizer if external_wordlist: if jieba_orig_tokenizer is None: jieba_orig_tokenizer = jieba.Tokenizer(dictionary=ORIG_DICT_FILENAME) return jieba_orig_tokenizer.lcut(text) else: if jieba_tokenizer is None: jieba_tokenizer = jieba.Tokenizer(dictionary=DICT_FILENAME) tokens = [] for _token, start, end in jieba_tokenizer.tokenize(simplify_chinese(text), HMM=False): tokens.append(text[start:end]) return tokens
Tokenize the given text into tokens whose word frequencies can probably be looked up. This uses Jieba, a word-frequency-based tokenizer. If `external_wordlist` is False, we tell Jieba to default to using wordfreq's own Chinese wordlist, and not to infer unknown words using a hidden Markov model. This ensures that the multi-character tokens that it outputs will be ones whose word frequencies we can look up. If `external_wordlist` is True, this will use the largest version of Jieba's original dictionary, with HMM enabled, so its results will be independent of the data in wordfreq. These results will be better optimized for purposes that aren't looking up word frequencies, such as general- purpose tokenization, or collecting word frequencies in the first place.
def stop_db_session(exc=None): if has_db_session(): exc_type = None tb = None if exc: exc_type, exc, tb = get_exc_info(exc) db_session.__exit__(exc_type, exc, tb)
Stops the last db_session
def get_monitor_physical_size(monitor): width_value = ctypes.c_int(0) width = ctypes.pointer(width_value) height_value = ctypes.c_int(0) height = ctypes.pointer(height_value) _glfw.glfwGetMonitorPhysicalSize(monitor, width, height) return width_value.value, height_value.value
Returns the physical size of the monitor. Wrapper for: void glfwGetMonitorPhysicalSize(GLFWmonitor* monitor, int* width, int* height);
def aggregate(table, metrics=None, by=None, having=None, **kwds): if metrics is None: metrics = [] for k, v in sorted(kwds.items()): v = table._ensure_expr(v) metrics.append(v.name(k)) op = table.op().aggregate(table, metrics, by=by, having=having) return op.to_expr()
Aggregate a table with a given set of reductions, with grouping expressions, and post-aggregation filters. Parameters ---------- table : table expression metrics : expression or expression list by : optional, default None Grouping expressions having : optional, default None Post-aggregation filters Returns ------- agg_expr : TableExpr
def str_search(self, search: str) -> List[HistoryItem]: def isin(history_item): sloppy = utils.norm_fold(search) return sloppy in utils.norm_fold(history_item) or sloppy in utils.norm_fold(history_item.expanded) return [item for item in self if isin(item)]
Find history items which contain a given string :param search: the string to search for :return: a list of history items, or an empty list if the string was not found
def _is_number_match_OO(numobj1_in, numobj2_in): numobj1 = _copy_core_fields_only(numobj1_in) numobj2 = _copy_core_fields_only(numobj2_in) if (numobj1.extension is not None and numobj2.extension is not None and numobj1.extension != numobj2.extension): return MatchType.NO_MATCH country_code1 = numobj1.country_code country_code2 = numobj2.country_code if country_code1 != 0 and country_code2 != 0: if numobj1 == numobj2: return MatchType.EXACT_MATCH elif (country_code1 == country_code2 and _is_national_number_suffix_of_other(numobj1, numobj2)): return MatchType.SHORT_NSN_MATCH return MatchType.NO_MATCH numobj1.country_code = country_code2 if numobj1 == numobj2: return MatchType.NSN_MATCH if _is_national_number_suffix_of_other(numobj1, numobj2): return MatchType.SHORT_NSN_MATCH return MatchType.NO_MATCH
Takes two phone number objects and compares them for equality.
def logout(self): if self.cache(CONST.ACCESS_TOKEN): self._session = requests.session() self._devices = None self.update_cache({CONST.ACCESS_TOKEN: None}) return True
Explicit Skybell logout.
def validate(self): if not self.principals: raise InvalidApplicationPolicyError(error_message='principals not provided') if not self.actions: raise InvalidApplicationPolicyError(error_message='actions not provided') if any(not self._PRINCIPAL_PATTERN.match(p) for p in self.principals): raise InvalidApplicationPolicyError( error_message='principal should be 12-digit AWS account ID or "*"') unsupported_actions = sorted(set(self.actions) - set(self.SUPPORTED_ACTIONS)) if unsupported_actions: raise InvalidApplicationPolicyError( error_message='{} not supported'.format(', '.join(unsupported_actions))) return True
Check if the formats of principals and actions are valid. :return: True, if the policy is valid :raises: InvalidApplicationPolicyError
def convert(self, json, fout): self.build_markdown_body(json) self.build_header(json['name']) self.build_output(fout)
Convert json to markdown. Takes in a .json file as input and convert it to Markdown format, saving the generated .png images into ./images.
def FindUnspentCoinsByAssetAndTotal(self, asset_id, amount, from_addr=None, use_standard=False, watch_only_val=0, reverse=False): coins = self.FindUnspentCoinsByAsset(asset_id, from_addr=from_addr, use_standard=use_standard, watch_only_val=watch_only_val) sum = Fixed8(0) for coin in coins: sum = sum + coin.Output.Value if sum < amount: return None coins = sorted(coins, key=lambda coin: coin.Output.Value.value) if reverse: coins.reverse() total = Fixed8(0) for coin in coins: if coin.Output.Value == amount: return [coin] to_ret = [] for coin in coins: total = total + coin.Output.Value to_ret.append(coin) if total >= amount: break return to_ret
Finds unspent coin objects totalling a requested value in the wallet limited to those of a certain asset type. Args: asset_id (UInt256): a bytearray (len 32) representing an asset on the blockchain. amount (int): the amount of unspent coins that are being requested. from_addr (UInt160): a bytearray (len 20) representing an address. use_standard (bool): whether or not to only include standard contracts ( i.e not a smart contract addr ). watch_only_val (int): a flag ( 0 or 64 ) indicating whether or not to find coins that are in 'watch only' addresses. Returns: list: a list of ``neo.Wallet.Coin`` in the wallet that are not spent. this list is empty if there are not enough coins to satisfy the request.
def get_ptrms_angle(ptrms_best_fit_vector, B_lab_vector): ptrms_angle = math.degrees(math.acos(old_div(numpy.dot(ptrms_best_fit_vector,B_lab_vector),(numpy.sqrt(sum(ptrms_best_fit_vector**2)) * numpy.sqrt(sum(B_lab_vector**2)))))) return ptrms_angle
gives angle between principal direction of the ptrm data and the b_lab vector. this is NOT in SPD, but taken from Ron Shaar's old thellier_gui.py code. see PmagPy on github
def get_lambda_runtime_info(context): runtime_info = { 'remaining_time': context.get_remaining_time_in_millis(), 'function_name': context.function_name, 'function_version': context.function_version, 'invoked_function_arn': context.invoked_function_arn, 'memory_limit': context.memory_limit_in_mb, 'aws_request_id': context.aws_request_id, 'log_group_name': context.log_group_name, 'log_stream_name': context.log_stream_name } return runtime_info
Returns a dictionary of information about the AWS Lambda function invocation Arguments context: The context object from AWS Lambda.
def request(self, method, url, **kwargs): if not self.cache_storage: resp = super(CachingSession, self).request(method, url, **kwargs) resp.fromcache = False return resp resp = None method = method.lower() request_key = self.key_for_request(method, url, **kwargs) if request_key and not self.cache_write_only: resp = self.cache_storage.get(request_key) if resp: resp.fromcache = True else: resp = super(CachingSession, self).request(method, url, **kwargs) if request_key and self.should_cache_response(resp): self.cache_storage.set(request_key, resp) resp.fromcache = False return resp
Override, wraps Session.request in caching. Cache is only used if key_for_request returns a valid key and should_cache_response was true as well.
def main(testfiles=None, action=printer): testfiles = get_filename_list(testfiles) print(testfiles) if action: for i in (simple_identifier, value, item_list): i.setParseAction(action) success = 0 failures = [] retval = {} for f in testfiles: try: retval[f] = object_definition.parseFile(f) success += 1 except Exception: failures.append(f) if failures: print('\nfailed while processing %s' % ', '.join(failures)) print('\nsucceeded on %d of %d files' %(success, len(testfiles))) if len(retval) == 1 and len(testfiles) == 1: return retval[list(retval.keys())[0]] return retval
testfiles can be None, in which case the command line arguments are used as filenames. testfiles can be a string, in which case that file is parsed. testfiles can be a list. In all cases, the filenames will be globbed. If more than one file is parsed successfully, a dictionary of ParseResults is returned. Otherwise, a simple ParseResults is returned.