code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _expand_tasks(self, scopes): """Add all tasks in any requested goals. Returns the requested scopes, plus the added tasks, sorted by scope name. """ expanded_scopes = set(scopes) for scope, info in self._scope_to_info.items(): if info.category == ScopeInfo.TASK: outer = enclosing_scope(scope) while outer != GLOBAL_SCOPE: if outer in expanded_scopes: expanded_scopes.add(scope) break outer = enclosing_scope(outer) return sorted(expanded_scopes)
Add all tasks in any requested goals. Returns the requested scopes, plus the added tasks, sorted by scope name.
def bulkImport_json(self, filename, onDuplicate="error", formatType="auto", **params) : """bulk import from a file repecting arango's key/value format""" url = "%s/import" % self.database.URL params["onDuplicate"] = onDuplicate params["collection"] = self.name params["type"] = formatType with open(filename) as f: data = f.read() r = self.connection.session.post(URL, params = params, data = data) try : errorMessage = "At least: %d errors. The first one is: '%s'\n\n more in <this_exception>.data" % (len(data), data[0]["errorMessage"]) except KeyError: raise UpdateError(data['errorMessage'], data)
bulk import from a file repecting arango's key/value format
def printArchive(fileName): """ Prints content of combine archive :param fileName: path of archive :return: None """ archive = CombineArchive() if archive.initializeFromArchive(fileName) is None: print("Invalid Combine Archive") return None print('*'*80) print('Print archive:', fileName) print('*' * 80) printMetaDataFor(archive, ".") print("Num Entries: {0}".format(archive.getNumEntries())) for i in range(archive.getNumEntries()): entry = archive.getEntry(i) print(" {0}: location: {1} format: {2}".format(i, entry.getLocation(), entry.getFormat())) printMetaDataFor(archive, entry.getLocation()) for j in range(entry.getNumCrossRefs()): print(" {0}: crossRef location {1}".format(j, entry.getCrossRef(j).getLocation())) # the entry could now be extracted via # archive.extractEntry(entry.getLocation(), <filename or folder>) # or used as string # content = archive.extractEntryToString(entry.getLocation()); archive.cleanUp()
Prints content of combine archive :param fileName: path of archive :return: None
def gameValue(self): """identify the correpsonding internal SC2 game value for self.type's value""" allowed = type(self).ALLOWED_TYPES try: if isinstance(allowed, dict): # if ALLOWED_TYPES is not a dict, there is no-internal game value mapping defined return allowed.get(self.type.name) except: pass # None .type values are okay -- such result in a None gameValue() result return None
identify the correpsonding internal SC2 game value for self.type's value
def sanitize_git_path(self, uri, ref=None): """Take a git URI and ref and converts it to a directory safe path. Args: uri (string): git URI (e.g. git@github.com:foo/bar.git) ref (string): optional git ref to be appended to the path Returns: str: Directory name for the supplied uri """ if uri.endswith('.git'): dir_name = uri[:-4] # drop .git else: dir_name = uri dir_name = self.sanitize_uri_path(dir_name) if ref is not None: dir_name += "-%s" % ref return dir_name
Take a git URI and ref and converts it to a directory safe path. Args: uri (string): git URI (e.g. git@github.com:foo/bar.git) ref (string): optional git ref to be appended to the path Returns: str: Directory name for the supplied uri
def cublasDtpmv(handle, uplo, trans, diag, n, AP, x, incx): """ Matrix-vector product for real triangular-packed matrix. """ status = _libcublas.cublasDtpmv_v2(handle, _CUBLAS_FILL_MODE[uplo], _CUBLAS_OP[trans], _CUBLAS_DIAG[diag], n, int(AP), int(x), incx) cublasCheckStatus(status)
Matrix-vector product for real triangular-packed matrix.
def _trim_zeros_complex(str_complexes, na_rep='NaN'): """ Separates the real and imaginary parts from the complex number, and executes the _trim_zeros_float method on each of those. """ def separate_and_trim(str_complex, na_rep): num_arr = str_complex.split('+') return (_trim_zeros_float([num_arr[0]], na_rep) + ['+'] + _trim_zeros_float([num_arr[1][:-1]], na_rep) + ['j']) return [''.join(separate_and_trim(x, na_rep)) for x in str_complexes]
Separates the real and imaginary parts from the complex number, and executes the _trim_zeros_float method on each of those.
def get_list_attribute(self, attribute): """ :return: attribute value as Python list. """ list_attribute = self.api.getListAttribute(self.obj_ref(), attribute) # IXN returns '::ixNet::OK' for invalid attributes. We want error. if list_attribute == ['::ixNet::OK']: raise TgnError(self.ref + ' does not have attribute ' + attribute) return list_attribute
:return: attribute value as Python list.
def add(self, *args, **kwargs): """Add the instance tied to the field to all the indexes For the parameters, seen BaseIndex.add """ check_uniqueness = kwargs.pop('check_uniqueness', False) args = self.prepare_args(args) for index in self._indexes: index.add(*args, check_uniqueness=check_uniqueness and index.handle_uniqueness, **kwargs) if check_uniqueness and index.handle_uniqueness: check_uniqueness = False
Add the instance tied to the field to all the indexes For the parameters, seen BaseIndex.add
def check_dimensions(self, dataset): ''' Checks that the feature types of this dataset are consitent with a point dataset ''' required_ctx = TestCtx(BaseCheck.HIGH, 'All geophysical variables are point feature types') t = util.get_time_variable(dataset) # Exit prematurely if not t: required_ctx.assert_true(False, 'A dimension representing time is required for point feature types') return required_ctx.to_result() t_dims = dataset.variables[t].dimensions o = None or (t_dims and t_dims[0]) message = '{} must be a valid timeseries feature type. It must have dimensions of ({}), and all coordinates must have dimensions of ({})' for variable in util.get_geophysical_variables(dataset): is_valid = util.is_point(dataset, variable) required_ctx.assert_true( is_valid, message.format(variable, o, o) ) return required_ctx.to_result()
Checks that the feature types of this dataset are consitent with a point dataset
def _set_qsfpp(self, v, load=False): """ Setter method for qsfpp, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfpp (container) If this variable is read-only (config: false) in the source YANG file, then _set_qsfpp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_qsfpp() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=qsfpp.qsfpp, is_container='container', presence=False, yang_name="qsfpp", rest_name="qsfpp", parent=self, choice=(u'interface-identifier', u'qsfpp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """qsfpp must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=qsfpp.qsfpp, is_container='container', presence=False, yang_name="qsfpp", rest_name="qsfpp", parent=self, choice=(u'interface-identifier', u'qsfpp'), path_helper=self._path_helper, extmethods=self._extmethods, register_paths=False, extensions=None, namespace='urn:brocade.com:mgmt:brocade-interface-ext', defining_module='brocade-interface-ext', yang_type='container', is_config=True)""", }) self.__qsfpp = t if hasattr(self, '_set'): self._set()
Setter method for qsfpp, mapped from YANG variable /brocade_interface_ext_rpc/get_media_detail/output/interface/qsfpp (container) If this variable is read-only (config: false) in the source YANG file, then _set_qsfpp is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_qsfpp() directly.
def command(state, args): """Search AniDB.""" args = parser.parse_args(args[1:]) if not args.query: print('Must supply query.') return search_query = _compile_re_query(args.query) results = state.titles.search(search_query) results = [(anime.aid, anime.main_title) for anime in results] state.results['anidb'].set(results) state.results['anidb'].print()
Search AniDB.
def ReferenceResults(self, field, allow_edit=False): """Render Reference Results Table """ instance = getattr(self, "instance", field.aq_parent) table = api.get_view("table_reference_results", context=instance, request=self.REQUEST) # Call listing hooks table.update() table.before_render() return table.ajax_contents_table()
Render Reference Results Table
def iterfollow(self): """ Generator for self.follow() """ # use same criterion as self.follow() if self.links is None: return if self.links.get("next"): yield self.follow() else: raise StopIteration
Generator for self.follow()
def get_uservar(self, user, name): """Get a variable about a user. :param str user: The user ID to look up a variable for. :param str name: The name of the variable to get. :return: The user variable, or ``None`` or ``"undefined"``: * If the user has no data at all, this returns ``None``. * If the user doesn't have this variable set, this returns the string ``"undefined"``. * Otherwise this returns the string value of the variable. """ if name == '__lastmatch__': # Treat var `__lastmatch__` since it can't receive "undefined" value return self.last_match(user) else: return self._session.get(user, name)
Get a variable about a user. :param str user: The user ID to look up a variable for. :param str name: The name of the variable to get. :return: The user variable, or ``None`` or ``"undefined"``: * If the user has no data at all, this returns ``None``. * If the user doesn't have this variable set, this returns the string ``"undefined"``. * Otherwise this returns the string value of the variable.
def get_archive(self, container, path, chunk_size=DEFAULT_DATA_CHUNK_SIZE): """ Retrieve a file or folder from a container in the form of a tar archive. Args: container (str): The container where the file is located path (str): Path to the file or folder to retrieve chunk_size (int): The number of bytes returned by each iteration of the generator. If ``None``, data will be streamed as it is received. Default: 2 MB Returns: (tuple): First element is a raw tar data stream. Second element is a dict containing ``stat`` information on the specified ``path``. Raises: :py:class:`docker.errors.APIError` If the server returns an error. Example: >>> c = docker.APIClient() >>> f = open('./sh_bin.tar', 'wb') >>> bits, stat = c.get_archive(container, '/bin/sh') >>> print(stat) {'name': 'sh', 'size': 1075464, 'mode': 493, 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''} >>> for chunk in bits: ... f.write(chunk) >>> f.close() """ params = { 'path': path } url = self._url('/containers/{0}/archive', container) res = self._get(url, params=params, stream=True) self._raise_for_status(res) encoded_stat = res.headers.get('x-docker-container-path-stat') return ( self._stream_raw_result(res, chunk_size, False), utils.decode_json_header(encoded_stat) if encoded_stat else None )
Retrieve a file or folder from a container in the form of a tar archive. Args: container (str): The container where the file is located path (str): Path to the file or folder to retrieve chunk_size (int): The number of bytes returned by each iteration of the generator. If ``None``, data will be streamed as it is received. Default: 2 MB Returns: (tuple): First element is a raw tar data stream. Second element is a dict containing ``stat`` information on the specified ``path``. Raises: :py:class:`docker.errors.APIError` If the server returns an error. Example: >>> c = docker.APIClient() >>> f = open('./sh_bin.tar', 'wb') >>> bits, stat = c.get_archive(container, '/bin/sh') >>> print(stat) {'name': 'sh', 'size': 1075464, 'mode': 493, 'mtime': '2018-10-01T15:37:48-07:00', 'linkTarget': ''} >>> for chunk in bits: ... f.write(chunk) >>> f.close()
def filter(self, table, vg_snapshots, filter_string): """Naive case-insensitive search.""" query = filter_string.lower() return [vg_snapshot for vg_snapshot in vg_snapshots if query in vg_snapshot.name.lower()]
Naive case-insensitive search.
def is_all_field_none(self): """ :rtype: bool """ if self._id_ is not None: return False if self._time_responded is not None: return False if self._time_expiry is not None: return False if self._monetary_account_id is not None: return False if self._amount_inquired is not None: return False if self._amount_responded is not None: return False if self._alias is not None: return False if self._counterparty_alias is not None: return False if self._description is not None: return False if self._attachment is not None: return False if self._status is not None: return False if self._minimum_age is not None: return False if self._require_address is not None: return False if self._address_shipping is not None: return False if self._address_billing is not None: return False if self._geolocation is not None: return False if self._redirect_url is not None: return False if self._type_ is not None: return False if self._sub_type is not None: return False if self._allow_chat is not None: return False if self._eligible_whitelist_id is not None: return False return True
:rtype: bool
def extract_version(filepath='jeni.py', name='__version__'): """Parse __version__ out of given Python file. Given jeni.py has dependencies, `from jeni import __version__` will fail. """ context = {} for line in open(filepath): if name in line: exec(line, context) break else: raise RuntimeError('{} not found in {}'.format(name, filepath)) return context[name]
Parse __version__ out of given Python file. Given jeni.py has dependencies, `from jeni import __version__` will fail.
def extract(binary): ''' Extract a code object from a binary pyc file. :param binary: a sequence of bytes from a pyc file. ''' if len(binary) <= 8: raise Exception("Binary pyc must be greater than 8 bytes (got %i)" % len(binary)) magic = binary[:4] MAGIC = get_magic() if magic != MAGIC: raise Exception("Python version mismatch (%r != %r) Is this a pyc file?" % (magic, MAGIC)) modtime = time.asctime(time.localtime(struct.unpack('i', binary[4:8])[0])) code = marshal.loads(binary[8:]) return modtime, code
Extract a code object from a binary pyc file. :param binary: a sequence of bytes from a pyc file.
def array_split( ary, indices_or_sections=None, axis=None, tile_shape=None, max_tile_bytes=None, max_tile_shape=None, sub_tile_shape=None, halo=None ): "To be replaced." return [ ary[slyce] for slyce in shape_split( array_shape=ary.shape, indices_or_sections=indices_or_sections, axis=axis, array_start=None, array_itemsize=ary.itemsize, tile_shape=tile_shape, max_tile_bytes=max_tile_bytes, max_tile_shape=max_tile_shape, sub_tile_shape=sub_tile_shape, halo=halo, tile_bounds_policy=ARRAY_BOUNDS ).flatten() ]
To be replaced.
def add(name, gid=None, **kwargs): ''' Add the specified group CLI Example: .. code-block:: bash salt '*' group.add foo 3456 ''' ### NOTE: **kwargs isn't used here but needs to be included in this ### function for compatibility with the group.present state if info(name): raise CommandExecutionError( 'Group \'{0}\' already exists'.format(name) ) if salt.utils.stringutils.contains_whitespace(name): raise SaltInvocationError('Group name cannot contain whitespace') if name.startswith('_'): raise SaltInvocationError( 'Salt will not create groups beginning with underscores' ) if gid is not None and not isinstance(gid, int): raise SaltInvocationError('gid must be an integer') # check if gid is already in use gid_list = _list_gids() if six.text_type(gid) in gid_list: raise CommandExecutionError( 'gid \'{0}\' already exists'.format(gid) ) cmd = ['dseditgroup', '-o', 'create'] if gid: cmd.extend(['-i', gid]) cmd.append(name) return __salt__['cmd.retcode'](cmd, python_shell=False) == 0
Add the specified group CLI Example: .. code-block:: bash salt '*' group.add foo 3456
def firmware_autoupgrade_params_username(self, **kwargs): """Auto Generated Code """ config = ET.Element("config") firmware = ET.SubElement(config, "firmware", xmlns="urn:brocade.com:mgmt:brocade-firmware") autoupgrade_params = ET.SubElement(firmware, "autoupgrade-params") username = ET.SubElement(autoupgrade_params, "username") username.text = kwargs.pop('username') callback = kwargs.pop('callback', self._callback) return callback(config)
Auto Generated Code
def tag(self, *tags): """ Tags the job with one or more unique indentifiers. Tags must be hashable. Duplicate tags are discarded. :param tags: A unique list of ``Hashable`` tags. :return: The invoked job instance """ if any([not isinstance(tag, collections.Hashable) for tag in tags]): raise TypeError('Every tag should be hashable') if not all(isinstance(tag, collections.Hashable) for tag in tags): raise TypeError('Tags must be hashable') self.tags.update(tags) return self
Tags the job with one or more unique indentifiers. Tags must be hashable. Duplicate tags are discarded. :param tags: A unique list of ``Hashable`` tags. :return: The invoked job instance
def state_likelihood(self, beta, alpha): """ Returns likelihood of the states given the variance latent variables Parameters ---------- beta : np.array Contains untransformed starting values for latent variables alpha : np.array State matrix Returns ---------- State likelihood """ _, _, _, Q = self._ss_matrices(beta) residuals = alpha[0][1:]-alpha[0][:-1] return np.sum(ss.norm.logpdf(residuals, loc=0, scale=np.power(Q.ravel(),0.5)))
Returns likelihood of the states given the variance latent variables Parameters ---------- beta : np.array Contains untransformed starting values for latent variables alpha : np.array State matrix Returns ---------- State likelihood
def all_subclasses(cls): """ Recursively generate of all the subclasses of class cls. """ for subclass in cls.__subclasses__(): yield subclass for subc in all_subclasses(subclass): yield subc
Recursively generate of all the subclasses of class cls.
def get_jwt_decrypt_keys(self, jwt, **kwargs): """ Get decryption keys from this keyjar based on information carried in a JWE. These keys should be usable to decrypt an encrypted JWT. :param jwt: A cryptojwt.jwt.JWT instance :param kwargs: Other key word arguments :return: list of usable keys """ try: _key_type = jwe_alg2keytype(jwt.headers['alg']) except KeyError: _key_type = '' try: _kid = jwt.headers['kid'] except KeyError: logger.info('Missing kid') _kid = '' keys = self.get(key_use='enc', owner='', key_type=_key_type) try: _aud = kwargs['aud'] except KeyError: _aud = '' if _aud: try: allow_missing_kid = kwargs['allow_missing_kid'] except KeyError: allow_missing_kid = False try: nki = kwargs['no_kid_issuer'] except KeyError: nki = {} keys = self._add_key(keys, _aud, 'enc', _key_type, _kid, nki, allow_missing_kid) # Only want the appropriate keys. keys = [k for k in keys if k.appropriate_for('decrypt')] return keys
Get decryption keys from this keyjar based on information carried in a JWE. These keys should be usable to decrypt an encrypted JWT. :param jwt: A cryptojwt.jwt.JWT instance :param kwargs: Other key word arguments :return: list of usable keys
def getSiblings(self, retracted=False): """ Returns the list of analyses of the Analysis Request to which this analysis belongs to, but with the current analysis excluded. :param retracted: If false, retracted/rejected siblings are dismissed :type retracted: bool :return: list of siblings for this analysis :rtype: list of IAnalysis """ request = self.getRequest() if not request: return [] siblings = [] retracted_states = [STATE_RETRACTED, STATE_REJECTED] for sibling in request.getAnalyses(full_objects=True): if api.get_uid(sibling) == self.UID(): # Exclude me from the list continue if not retracted: if api.get_workflow_status_of(sibling) in retracted_states: # Exclude retracted analyses continue siblings.append(sibling) return siblings
Returns the list of analyses of the Analysis Request to which this analysis belongs to, but with the current analysis excluded. :param retracted: If false, retracted/rejected siblings are dismissed :type retracted: bool :return: list of siblings for this analysis :rtype: list of IAnalysis
def conditional_http_tween_factory(handler, registry): """ Tween that adds ETag headers and tells Pyramid to enable conditional responses where appropriate. """ settings = registry.settings if hasattr(registry, 'settings') else {} not_cacheble_list = [] if 'not.cachable.list' in settings: not_cacheble_list = settings.get('not.cachable.list').split() def conditional_http_tween(request): response = handler(request) if request.path not in not_cacheble_list: # If the Last-Modified header has been set, we want to enable the # conditional response processing. if response.last_modified is not None: response.conditional_response = True # We want to only enable the conditional machinery if either we # were given an explicit ETag header by the view or we have a # buffered response and can generate the ETag header ourself. if response.etag is not None: response.conditional_response = True elif (isinstance(response.app_iter, Sequence) and len(response.app_iter) == 1) and response.body is not None: response.conditional_response = True response.md5_etag() return response return conditional_http_tween
Tween that adds ETag headers and tells Pyramid to enable conditional responses where appropriate.
def isempty(result): ''' Finds out if a scraping result should be considered empty. ''' if isinstance(result, list): for element in result: if isinstance(element, list): if not isempty(element): return False else: if element is not None: return False else: if result is not None: return False return True
Finds out if a scraping result should be considered empty.
def make(self, selection): """ Scopes and selectors are tested in this order: * is this a CSS selector with an appended @something attribute? * is this a regular CSS selector? * is this an XPath expression? XPath expression can also use EXSLT functions (as long as they are understood by libxslt) """ cached = self._selector_cache.get(selection) if cached: return cached namespaces = self.EXSLT_NAMESPACES self._add_parsley_ns(namespaces) try: # CSS with attribute? (non-standard but convenient) # CSS selector cannot select attributes # this "<css selector> @<attr>" syntax is a Parsley extension # construct CSS selector and append attribute to XPath expression m = self.REGEX_ENDING_ATTRIBUTE.match(selection) if m: # the selector should be a regular CSS selector cssxpath = css_to_xpath(m.group("expr")) # if "|" is used for namespace prefix reference, # convert it to XPath prefix syntax attribute = m.group("attr").replace('|', ':') cssxpath = "%s/%s" % (cssxpath, attribute) else: cssxpath = css_to_xpath(selection) selector = lxml.etree.XPath( cssxpath, namespaces = self.namespaces, extensions = self.extensions, smart_strings=(self.SMART_STRINGS or self._test_smart_strings_needed(selection)), ) except tuple(self.CSSSELECT_SYNTAXERROR_EXCEPTIONS) as syntax_error: if self.DEBUG: print(repr(syntax_error), selection) print("Try interpreting as XPath selector") try: selector = lxml.etree.XPath(selection, namespaces = self.namespaces, extensions = self.extensions, smart_strings=(self.SMART_STRINGS or self._test_smart_strings_needed(selection)), ) except lxml.etree.XPathSyntaxError as syntax_error: syntax_error.msg += ": %s" % selection raise syntax_error except Exception as e: if self.DEBUG: print(repr(e), selection) raise # for exception when trying to convert <cssselector> @<attribute> syntax except lxml.etree.XPathSyntaxError as syntax_error: syntax_error.msg += ": %s" % selection raise syntax_error except Exception as e: if self.DEBUG: print(repr(e), selection) raise # wrap it/cache it self._selector_cache[selection] = Selector(selector) return self._selector_cache[selection]
Scopes and selectors are tested in this order: * is this a CSS selector with an appended @something attribute? * is this a regular CSS selector? * is this an XPath expression? XPath expression can also use EXSLT functions (as long as they are understood by libxslt)
def log_debug(msg, logger="TaskLogger"): """Log a DEBUG message Convenience function to log a message to the default Logger Parameters ---------- msg : str Message to be logged logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger """ tasklogger = get_tasklogger(logger) tasklogger.debug(msg) return tasklogger
Log a DEBUG message Convenience function to log a message to the default Logger Parameters ---------- msg : str Message to be logged logger : str, optional (default: "TaskLogger") Unique name of the logger to retrieve Returns ------- logger : TaskLogger
def read_nonblocking(self, size=1, timeout=-1): '''This reads at most size characters from the child application. It includes a timeout. If the read does not complete within the timeout period then a TIMEOUT exception is raised. If the end of file is read then an EOF exception will be raised. If a logfile is specified, a copy is written to that log. If timeout is None then the read may block indefinitely. If timeout is -1 then the self.timeout value is used. If timeout is 0 then the child is polled and if there is no data immediately ready then this will raise a TIMEOUT exception. The timeout refers only to the amount of time to read at least one character. This is not affected by the 'size' parameter, so if you call read_nonblocking(size=100, timeout=30) and only one character is available right away then one character will be returned immediately. It will not wait for 30 seconds for another 99 characters to come in. This is a wrapper around os.read(). It uses select.select() to implement the timeout. ''' if self.closed: raise ValueError('I/O operation on closed file.') if timeout == -1: timeout = self.timeout # Note that some systems such as Solaris do not give an EOF when # the child dies. In fact, you can still try to read # from the child_fd -- it will block forever or until TIMEOUT. # For this case, I test isalive() before doing any reading. # If isalive() is false, then I pretend that this is the same as EOF. if not self.isalive(): # timeout of 0 means "poll" if self.use_poll: r = poll_ignore_interrupts([self.child_fd], timeout) else: r, w, e = select_ignore_interrupts([self.child_fd], [], [], 0) if not r: self.flag_eof = True raise EOF('End Of File (EOF). Braindead platform.') elif self.__irix_hack: # Irix takes a long time before it realizes a child was terminated. # FIXME So does this mean Irix systems are forced to always have # FIXME a 2 second delay when calling read_nonblocking? That sucks. if self.use_poll: r = poll_ignore_interrupts([self.child_fd], timeout) else: r, w, e = select_ignore_interrupts([self.child_fd], [], [], 2) if not r and not self.isalive(): self.flag_eof = True raise EOF('End Of File (EOF). Slow platform.') if self.use_poll: r = poll_ignore_interrupts([self.child_fd], timeout) else: r, w, e = select_ignore_interrupts( [self.child_fd], [], [], timeout ) if not r: if not self.isalive(): # Some platforms, such as Irix, will claim that their # processes are alive; timeout on the select; and # then finally admit that they are not alive. self.flag_eof = True raise EOF('End of File (EOF). Very slow platform.') else: raise TIMEOUT('Timeout exceeded.') if self.child_fd in r: return super(spawn, self).read_nonblocking(size) raise ExceptionPexpect('Reached an unexpected state.')
This reads at most size characters from the child application. It includes a timeout. If the read does not complete within the timeout period then a TIMEOUT exception is raised. If the end of file is read then an EOF exception will be raised. If a logfile is specified, a copy is written to that log. If timeout is None then the read may block indefinitely. If timeout is -1 then the self.timeout value is used. If timeout is 0 then the child is polled and if there is no data immediately ready then this will raise a TIMEOUT exception. The timeout refers only to the amount of time to read at least one character. This is not affected by the 'size' parameter, so if you call read_nonblocking(size=100, timeout=30) and only one character is available right away then one character will be returned immediately. It will not wait for 30 seconds for another 99 characters to come in. This is a wrapper around os.read(). It uses select.select() to implement the timeout.
def read(self, size=None): """Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed. """ if not self._is_open: raise IOError('Not opened.') return self._vshadow_store.read(size)
Reads a byte string from the file-like object at the current offset. The function will read a byte string of the specified size or all of the remaining data if no size was specified. Args: size (Optional[int]): number of bytes to read, where None is all remaining data. Returns: bytes: data read. Raises: IOError: if the read failed. OSError: if the read failed.
def assign_account_entitlement_for_user(self, body, user_id, dont_notify_user=None, origin=None): """AssignAccountEntitlementForUser. [Preview API] Assign an explicit account entitlement :param :class:`<AccountEntitlementUpdateModel> <azure.devops.v5_0.licensing.models.AccountEntitlementUpdateModel>` body: The update model for the entitlement :param str user_id: The id of the user :param bool dont_notify_user: :param str origin: :rtype: :class:`<AccountEntitlement> <azure.devops.v5_0.licensing.models.AccountEntitlement>` """ route_values = {} if user_id is not None: route_values['userId'] = self._serialize.url('user_id', user_id, 'str') query_parameters = {} if dont_notify_user is not None: query_parameters['dontNotifyUser'] = self._serialize.query('dont_notify_user', dont_notify_user, 'bool') if origin is not None: query_parameters['origin'] = self._serialize.query('origin', origin, 'str') content = self._serialize.body(body, 'AccountEntitlementUpdateModel') response = self._send(http_method='PUT', location_id='6490e566-b299-49a7-a4e4-28749752581f', version='5.0-preview.1', route_values=route_values, query_parameters=query_parameters, content=content) return self._deserialize('AccountEntitlement', response)
AssignAccountEntitlementForUser. [Preview API] Assign an explicit account entitlement :param :class:`<AccountEntitlementUpdateModel> <azure.devops.v5_0.licensing.models.AccountEntitlementUpdateModel>` body: The update model for the entitlement :param str user_id: The id of the user :param bool dont_notify_user: :param str origin: :rtype: :class:`<AccountEntitlement> <azure.devops.v5_0.licensing.models.AccountEntitlement>`
def parse(self, generator): """Parse an iterable source of strings into a generator""" gen = iter(generator) for line in gen: block = {} for rule in self.rules: if rule[0](line): block = rule[1](line, gen) break yield block
Parse an iterable source of strings into a generator
def from_size(value): ''' Convert zfs size (human readble) to python int (bytes) ''' match_size = re_zfs_size.match(str(value)) if match_size: v_unit = match_size.group(2).upper()[0] v_size = float(match_size.group(1)) v_multiplier = math.pow(1024, zfs_size.index(v_unit) + 1) value = v_size * v_multiplier if int(value) == value: value = int(value) elif value is not None: value = str(value) return from_numeric(value)
Convert zfs size (human readble) to python int (bytes)
def _probs(density_matrix: np.ndarray, indices: List[int], num_qubits: int) -> List[float]: """Returns the probabilities for a measurement on the given indices.""" # Only diagonal elements matter. all_probs = np.diagonal( np.reshape(density_matrix, (2 ** num_qubits, 2 ** num_qubits))) # Shape into a tensor tensor = np.reshape(all_probs, [2] * num_qubits) # Calculate the probabilities for measuring the particular results. probs = [ np.sum(np.abs(tensor[linalg.slice_for_qubits_equal_to(indices, b)])) for b in range(2 ** len(indices))] # To deal with rounding issues, ensure that the probabilities sum to 1. probs /= np.sum(probs) # type: ignore return probs
Returns the probabilities for a measurement on the given indices.
def run(self): """ Span a background thread to periodically report queued spans """ self.timer = t.Thread(target=self.report_spans) self.timer.daemon = True self.timer.name = "Instana Span Reporting" self.timer.start()
Span a background thread to periodically report queued spans
def parentItem(self, value): """ The parent item """ self._parentItem = value self._recursiveSetNodePath(self._constructNodePath())
The parent item
def read_stats(self): """ Read current statistics from chassis. :return: dictionary {tpld full index {group name {stat name: stat value}}} """ self.statistics = TgnObjectsDict() for port in self.session.ports.values(): for tpld in port.tplds.values(): self.statistics[tpld] = tpld.read_stats() return self.statistics
Read current statistics from chassis. :return: dictionary {tpld full index {group name {stat name: stat value}}}
def fetch_batch(self, formatter=TableFormat): """ Fetch a batch of logs and return using the specified formatter. Formatter is class type defined in :py:mod:`smc_monitoring.models.formatters`. This fetch type will be a single shot fetch (this method forces ``fetch_type='stored'``). If ``fetch_size`` is not already set on the query, the default fetch_size will be 200. :param formatter: Formatter type for data representation. Any type in :py:mod:`smc_monitoring.models.formatters`. :return: generator returning data in specified format """ clone = self.copy() clone.update_query(type='stored') if not clone.fetch_size or clone.fetch_size <= 0: clone.request['fetch'].update(quantity=200) fmt = formatter(clone) for result in clone.fetch_raw(): yield fmt.formatted(result)
Fetch a batch of logs and return using the specified formatter. Formatter is class type defined in :py:mod:`smc_monitoring.models.formatters`. This fetch type will be a single shot fetch (this method forces ``fetch_type='stored'``). If ``fetch_size`` is not already set on the query, the default fetch_size will be 200. :param formatter: Formatter type for data representation. Any type in :py:mod:`smc_monitoring.models.formatters`. :return: generator returning data in specified format
def _handle_request(self, request): """Finds the resource to which a request maps and then calls it. Instantiates, fills and returns a :class:`webob.Response` object. If no resource matches the request, a 404 status is set on the response object. :param request: Object representing the current request. :type request: :class:`webob.Request` """ response = webob.Response(request=request) path = request.path_info parsed = self._urlmap(path) if parsed: path_params, resource = parsed else: path_params, resource = {}, self.NOT_FOUND_RESOURCE instance = resource(request=request, response=response, path_params=path_params, application=self) response = instance() if request.method == 'HEAD': response.body = '' return response
Finds the resource to which a request maps and then calls it. Instantiates, fills and returns a :class:`webob.Response` object. If no resource matches the request, a 404 status is set on the response object. :param request: Object representing the current request. :type request: :class:`webob.Request`
def remove(self, key): """ Transactional implementation of :func:`Map.remove(key) <hazelcast.proxy.map.Map.remove>` The object to be removed will be removed from only the current transaction context until the transaction is committed. :param key: (object), key of the mapping to be deleted. :return: (object), the previous value associated with key, or ``None`` if there was no mapping for key. """ check_not_none(key, "key can't be none") return self._encode_invoke(transactional_map_remove_codec, key=self._to_data(key))
Transactional implementation of :func:`Map.remove(key) <hazelcast.proxy.map.Map.remove>` The object to be removed will be removed from only the current transaction context until the transaction is committed. :param key: (object), key of the mapping to be deleted. :return: (object), the previous value associated with key, or ``None`` if there was no mapping for key.
def _write_marker(self, indent_string, depth, entry, comment): """Write a section marker line""" return '%s%s%s%s%s' % (indent_string, self._a_to_u('[' * depth), self._quote(self._decode_element(entry), multiline=False), self._a_to_u(']' * depth), self._decode_element(comment))
Write a section marker line
def xpathNextParent(self, cur): """Traversal function for the "parent" direction The parent axis contains the parent of the context node, if there is one. """ if cur is None: cur__o = None else: cur__o = cur._o ret = libxml2mod.xmlXPathNextParent(self._o, cur__o) if ret is None:raise xpathError('xmlXPathNextParent() failed') __tmp = xmlNode(_obj=ret) return __tmp
Traversal function for the "parent" direction The parent axis contains the parent of the context node, if there is one.
def _set_character_restriction(self, v, load=False): """ Setter method for character_restriction, mapped from YANG variable /password_attributes/character_restriction (container) If this variable is read-only (config: false) in the source YANG file, then _set_character_restriction is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_character_restriction() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=character_restriction.character_restriction, is_container='container', presence=False, yang_name="character-restriction", rest_name="character-restriction", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure restriction on various types of\n characters'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """character_restriction must be of a type compatible with container""", 'defined-type': "container", 'generated-type': """YANGDynClass(base=character_restriction.character_restriction, is_container='container', presence=False, yang_name="character-restriction", rest_name="character-restriction", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Configure restriction on various types of\n characters'}}, namespace='urn:brocade.com:mgmt:brocade-aaa', defining_module='brocade-aaa', yang_type='container', is_config=True)""", }) self.__character_restriction = t if hasattr(self, '_set'): self._set()
Setter method for character_restriction, mapped from YANG variable /password_attributes/character_restriction (container) If this variable is read-only (config: false) in the source YANG file, then _set_character_restriction is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_character_restriction() directly.
def import_class(path): """ Import a class from a string module class path """ components = path.split(".") module = components[:-1] module = ".".join(module) mod = __import__(module, fromlist=[native_str(components[-1])]) return getattr(mod, native_str(components[-1]))
Import a class from a string module class path
def decode_data(self, encoded): ''' Decode sensor data. Returns: dict: Sensor values ''' try: identifier = None data_format = 2 if len(encoded) > 8: data_format = 4 identifier = encoded[8:] encoded = encoded[:8] decoded = bytearray(base64.b64decode(encoded, '-_')) return { 'data_format': data_format, 'temperature': self._get_temperature(decoded), 'humidity': self._get_humidity(decoded), 'pressure': self._get_pressure(decoded), 'identifier': identifier } except: log.exception('Encoded value: %s not valid', encoded) return None
Decode sensor data. Returns: dict: Sensor values
def extern_equals(self, context_handle, val1, val2): """Return true if the given Handles are __eq__.""" return self._ffi.from_handle(val1[0]) == self._ffi.from_handle(val2[0])
Return true if the given Handles are __eq__.
def _wrap_thing(self, thing, kind): """Mimic praw.Submission and praw.Comment API""" thing['created'] = self._epoch_utc_to_local(thing['created_utc']) thing['d_'] = copy.deepcopy(thing) ThingType = namedtuple(kind, thing.keys()) thing = ThingType(**thing) return thing
Mimic praw.Submission and praw.Comment API
def accept_vpc_peering_connection(name=None, conn_id=None, conn_name=None, region=None, key=None, keyid=None, profile=None): ''' Accept a VPC pending requested peering connection between two VPCs. name Name of this state conn_id The connection ID to accept. Exclusive with conn_name. String type. conn_name The name of the VPC peering connection to accept. Exclusive with conn_id. String type. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml boto_vpc.accept_vpc_peering_connection: - conn_name: salt_peering_connection # usage with vpc peering connection id and region boto_vpc.accept_vpc_peering_connection: - conn_id: pbx-1873d472 - region: us-west-2 ''' log.debug('Called state to accept VPC peering connection') pending = __salt__['boto_vpc.is_peering_connection_pending']( conn_id=conn_id, conn_name=conn_name, region=region, key=key, keyid=keyid, profile=profile) ret = { 'name': name, 'result': True, 'changes': {}, 'comment': 'Boto VPC peering state' } if not pending: ret['result'] = True ret['changes'].update({'old': 'No pending VPC peering connection found. Nothing to be done.'}) return ret if __opts__['test']: ret['changes'].update({'old': 'Pending VPC peering connection found and can be accepted'}) return ret fun = 'boto_vpc.accept_vpc_peering_connection' log.debug('Calling `%s()` to accept this VPC peering connection', fun) result = __salt__[fun](conn_id=conn_id, name=conn_name, region=region, key=key, keyid=keyid, profile=profile) if 'error' in result: ret['comment'] = "Failed to accept VPC peering: {0}".format(result['error']) ret['result'] = False return ret ret['changes'].update({'old': '', 'new': result['msg']}) return ret
Accept a VPC pending requested peering connection between two VPCs. name Name of this state conn_id The connection ID to accept. Exclusive with conn_name. String type. conn_name The name of the VPC peering connection to accept. Exclusive with conn_id. String type. region Region to connect to. key Secret key to be used. keyid Access key to be used. profile A dict with region, key and keyid, or a pillar key (string) that contains a dict with region, key and keyid. .. versionadded:: 2016.11.0 Example: .. code-block:: yaml boto_vpc.accept_vpc_peering_connection: - conn_name: salt_peering_connection # usage with vpc peering connection id and region boto_vpc.accept_vpc_peering_connection: - conn_id: pbx-1873d472 - region: us-west-2
def plugin(name, module=''): """ Returns the plugin for the given name. By default, the base Builder instance will be returned. :param name | <str> """ if module: mod = projex.importfile(module) if mod: return getattr(mod, nstr(name), None) return Builder._plugins.get(nstr(name))
Returns the plugin for the given name. By default, the base Builder instance will be returned. :param name | <str>
def _map(self, from_pos, to_pos, pos, base): """Map position between aligned sequences Positions in this function are 0-based. """ pos_i = -1 while pos_i < len(self.cigar_op) and pos >= from_pos[pos_i + 1]: pos_i += 1 if pos_i == -1 or pos_i == len(self.cigar_op): raise HGVSInvalidIntervalError("Position is beyond the bounds of transcript record") if self.cigar_op[pos_i] in "=MX": mapped_pos = to_pos[pos_i] + (pos - from_pos[pos_i]) mapped_pos_offset = 0 elif self.cigar_op[pos_i] in "DI": if base == "start": mapped_pos = to_pos[pos_i] - 1 elif base == "end": mapped_pos = to_pos[pos_i] mapped_pos_offset = 0 elif self.cigar_op[pos_i] == "N": if pos - from_pos[pos_i] + 1 <= from_pos[pos_i + 1] - pos: mapped_pos = to_pos[pos_i] - 1 mapped_pos_offset = pos - from_pos[pos_i] + 1 else: mapped_pos = to_pos[pos_i] mapped_pos_offset = -(from_pos[pos_i + 1] - pos) return mapped_pos, mapped_pos_offset, self.cigar_op[pos_i]
Map position between aligned sequences Positions in this function are 0-based.
def get_if_addr6(iff): """ Returns the main global unicast address associated with provided interface, in human readable form. If no global address is found, None is returned. """ return next((x[0] for x in in6_getifaddr() if x[2] == iff and x[1] == IPV6_ADDR_GLOBAL), None)
Returns the main global unicast address associated with provided interface, in human readable form. If no global address is found, None is returned.
def parse(self, content): """ Parse the fetched feed content Feedparser returned dict contain a 'bozo' key which can be '1' if the feed is malformed. Return None if the feed is malformed and 'bozo_accept' is 'False', else return the feed content dict. If the feed is malformed but 'bozo_accept' is 'True', the feed content dict will contain the parsing error exception informations in 'bozo_exception'. """ if content is None: return None feed = feedparser.parse(content) # When feed is malformed if feed['bozo']: # keep track of the parsing error exception but as string # infos, not an exception object exception_content = { "exception": str(type(feed['bozo_exception'])), "content": str(feed['bozo_exception'].getException()), "line": feed['bozo_exception'].getLineNumber(), "message": feed['bozo_exception'].getMessage(), } # Overwrite the bozo content from feedparser feed['bozo_exception'] = exception_content # bozo feeds are not accepted if not self.bozo_accept: feed = None return feed
Parse the fetched feed content Feedparser returned dict contain a 'bozo' key which can be '1' if the feed is malformed. Return None if the feed is malformed and 'bozo_accept' is 'False', else return the feed content dict. If the feed is malformed but 'bozo_accept' is 'True', the feed content dict will contain the parsing error exception informations in 'bozo_exception'.
def diag(A, k=0): """Extract or construct a diagonal polynomial array.""" if isinstance(A, Poly): core, core_new = A.A, {} for key in A.keys: core_new[key] = numpy.diag(core[key], k) return Poly(core_new, A.dim, None, A.dtype) return numpy.diag(A, k)
Extract or construct a diagonal polynomial array.
def get_instructions(self, cm, size, insn, idx): """ :param cm: a ClassManager object :type cm: :class:`ClassManager` object :param size: the total size of the buffer :type size: int :param insn: a raw buffer where are the instructions :type insn: string :param idx: a start address in the buffer :type idx: int :rtype: a generator of :class:`Instruction` objects """ self.odex = cm.get_odex_format() max_idx = size * calcsize('=H') if max_idx > len(insn): max_idx = len(insn) # Get instructions while idx < max_idx: obj = None classic_instruction = True op_value = insn[idx] #print "%x %x" % (op_value, idx) #payload instructions or extented/optimized instructions if (op_value == 0x00 or op_value == 0xff) and ((idx + 2) < max_idx): op_value = unpack('=H', insn[idx:idx + 2])[0] # payload instructions ? if op_value in DALVIK_OPCODES_PAYLOAD: try: obj = get_instruction_payload(op_value, insn[idx:]) classic_instruction = False except struct.error: warning("error while decoding instruction ...") elif op_value in DALVIK_OPCODES_EXTENDED_WIDTH: try: obj = get_extented_instruction(cm, op_value, insn[idx:]) classic_instruction = False except struct.error as why: warning("error while decoding instruction ..." + why.__str__()) # optimized instructions ? elif self.odex and (op_value in DALVIK_OPCODES_OPTIMIZED): obj = get_optimized_instruction(cm, op_value, insn[idx:]) classic_instruction = False # classical instructions if classic_instruction: op_value = insn[idx] obj = get_instruction(cm, op_value, insn[idx:], self.odex) # emit instruction yield obj idx = idx + obj.get_length()
:param cm: a ClassManager object :type cm: :class:`ClassManager` object :param size: the total size of the buffer :type size: int :param insn: a raw buffer where are the instructions :type insn: string :param idx: a start address in the buffer :type idx: int :rtype: a generator of :class:`Instruction` objects
def lookup(self, hostname): """ Find a hostkey entry for a given hostname or IP. If no entry is found, ``None`` is returned. Otherwise a dictionary of keytype to key is returned. The keytype will be either ``"ssh-rsa"`` or ``"ssh-dss"``. :param str hostname: the hostname (or IP) to lookup :return: dict of `str` -> `.PKey` keys associated with this host (or ``None``) """ class SubDict(MutableMapping): def __init__(self, hostname, entries, hostkeys): self._hostname = hostname self._entries = entries self._hostkeys = hostkeys def __iter__(self): for k in self.keys(): yield k def __len__(self): return len(self.keys()) def __delitem__(self, key): for e in list(self._entries): if e.key.get_name() == key: self._entries.remove(e) else: raise KeyError(key) def __getitem__(self, key): for e in self._entries: if e.key.get_name() == key: return e.key raise KeyError(key) def __setitem__(self, key, val): for e in self._entries: if e.key is None: continue if e.key.get_name() == key: # replace e.key = val break else: # add a new one e = HostKeyEntry([hostname], val) self._entries.append(e) self._hostkeys._entries.append(e) def keys(self): return [ e.key.get_name() for e in self._entries if e.key is not None ] entries = [] for e in self._entries: if self._hostname_matches(hostname, e): entries.append(e) if len(entries) == 0: return None return SubDict(hostname, entries, self)
Find a hostkey entry for a given hostname or IP. If no entry is found, ``None`` is returned. Otherwise a dictionary of keytype to key is returned. The keytype will be either ``"ssh-rsa"`` or ``"ssh-dss"``. :param str hostname: the hostname (or IP) to lookup :return: dict of `str` -> `.PKey` keys associated with this host (or ``None``)
def create_index_list(self, table_name, attr_names): """ :param str table_name: Table name that exists attribute. :param list attr_names: List of attribute names to create indices. Ignore attributes that are not existing in the table. .. seealso:: :py:meth:`.create_index` """ self.validate_access_permission(["w", "a"]) if typepy.is_empty_sequence(attr_names): return table_attr_set = set(self.fetch_attr_names(table_name)) index_attr_set = set(AttrList.sanitize(attr_names)) for attribute in list(table_attr_set.intersection(index_attr_set)): self.create_index(table_name, attribute)
:param str table_name: Table name that exists attribute. :param list attr_names: List of attribute names to create indices. Ignore attributes that are not existing in the table. .. seealso:: :py:meth:`.create_index`
def create_tags(filesystemid, tags, keyid=None, key=None, profile=None, region=None, **kwargs): ''' Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. filesystemid (string) - ID of the file system for whose tags will be modified. tags (dict) - The tags to add to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_tags ''' client = _get_conn(key=key, keyid=keyid, profile=profile, region=region) new_tags = [] for k, v in six.iteritems(tags): new_tags.append({'Key': k, 'Value': v}) client.create_tags(FileSystemId=filesystemid, Tags=new_tags)
Creates or overwrites tags associated with a file system. Each tag is a key-value pair. If a tag key specified in the request already exists on the file system, this operation overwrites its value with the value provided in the request. filesystemid (string) - ID of the file system for whose tags will be modified. tags (dict) - The tags to add to the file system CLI Example: .. code-block:: bash salt 'my-minion' boto_efs.create_tags
def prefetch_urls(self, urls): """ 预取文件列表,文档 http://developer.qiniu.com/article/fusion/api/prefetch.html Args: urls: 待预取的文件外链列表 Returns: 一个dict变量和一个ResponseInfo对象 参考代码 examples/cdn_manager.py """ req = {} req.update({"urls": urls}) body = json.dumps(req) url = '{0}/v2/tune/prefetch'.format(self.server) return self.__post(url, body)
预取文件列表,文档 http://developer.qiniu.com/article/fusion/api/prefetch.html Args: urls: 待预取的文件外链列表 Returns: 一个dict变量和一个ResponseInfo对象 参考代码 examples/cdn_manager.py
def get_attrs(cls): """ Get all class attributes ordered by definition """ ignore = dir(type('dummy', (object,), {})) + ['__metaclass__'] attrs = [ item for item in inspect.getmembers(cls) if item[0] not in ignore and not isinstance( item[1], ( types.FunctionType, types.MethodType, classmethod, staticmethod, property))] # sort by idx and use attribute name to break ties attrs.sort(key=lambda attr: (getattr(attr[1], 'idx', -1), attr[0])) return attrs
Get all class attributes ordered by definition
def backbone_bond_lengths(self): """Dictionary containing backbone bond lengths as lists of floats. Returns ------- bond_lengths : dict Keys are `n_ca`, `ca_c`, `c_o` and `c_n`, referring to the N-CA, CA-C, C=O and C-N bonds respectively. Values are lists of floats : the bond lengths in Angstroms. The lists of n_ca, ca_c and c_o are of length k for a Polypeptide containing k Residues. The list of c_n bonds is of length k-1 for a Polypeptide containing k Residues (C-N formed between successive `Residue` pairs). """ bond_lengths = dict( n_ca=[distance(r['N'], r['CA']) for r in self.get_monomers(ligands=False)], ca_c=[distance(r['CA'], r['C']) for r in self.get_monomers(ligands=False)], c_o=[distance(r['C'], r['O']) for r in self.get_monomers(ligands=False)], c_n=[distance(r1['C'], r2['N']) for r1, r2 in [ (self[i], self[i + 1]) for i in range(len(self) - 1)]], ) return bond_lengths
Dictionary containing backbone bond lengths as lists of floats. Returns ------- bond_lengths : dict Keys are `n_ca`, `ca_c`, `c_o` and `c_n`, referring to the N-CA, CA-C, C=O and C-N bonds respectively. Values are lists of floats : the bond lengths in Angstroms. The lists of n_ca, ca_c and c_o are of length k for a Polypeptide containing k Residues. The list of c_n bonds is of length k-1 for a Polypeptide containing k Residues (C-N formed between successive `Residue` pairs).
def get_unresolved_properties_by_inheritance(self, timeperiod): """ Fill full properties with template if needed for the unresolved values (example: sunday ETCETC) :return: None """ # Ok, I do not have prop, Maybe my templates do? # Same story for plus for i in timeperiod.templates: template = self.templates[i] timeperiod.unresolved.extend(template.unresolved)
Fill full properties with template if needed for the unresolved values (example: sunday ETCETC) :return: None
def get_disease(self, disease_name=None, disease_id=None, definition=None, parent_ids=None, tree_numbers=None, parent_tree_numbers=None, slim_mapping=None, synonym=None, alt_disease_id=None, limit=None, as_df=False): """ Get diseases :param bool as_df: if set to True result returns as `pandas.DataFrame` :param int limit: maximum number of results :param str disease_name: disease name :param str disease_id: disease identifier :param str definition: definition of disease :param str parent_ids: parent identifiers, delimiter | :param str tree_numbers: tree numbers, delimiter | :param str parent_tree_numbers: parent tree numbers, delimiter :param str slim_mapping: term derived from the MeSH tree structure for the “Diseases” [C] branch, \ that classifies MEDIC diseases into high-level categories :param str synonym: disease synonyms :param str alt_disease_id: alternative disease identifiers :return: list of :class:`pyctd.manager.models.Disease` object .. seealso:: :class:`pyctd.manager.models.Disease` .. todo:: normalize parent_ids, tree_numbers and parent_tree_numbers in :class:`pyctd.manager.models.Disease` """ q = self.session.query(models.Disease) if disease_name: q = q.filter(models.Disease.disease_name.like(disease_name)) if disease_id: q = q.filter(models.Disease.disease_id == disease_id) if definition: q = q.filter(models.Disease.definition.like(definition)) if parent_ids: q = q.filter(models.Disease.parent_ids.like(parent_ids)) if tree_numbers: q = q.filter(models.Disease.tree_numbers.like(tree_numbers)) if parent_tree_numbers: q = q.filter(models.Disease.parent_tree_numbers.like(parent_tree_numbers)) if slim_mapping: q = q.join(models.DiseaseSlimmapping).filter(models.DiseaseSlimmapping.slim_mapping.like(slim_mapping)) if synonym: q = q.join(models.DiseaseSynonym).filter(models.DiseaseSynonym.synonym.like(synonym)) if alt_disease_id: q = q.join(models.DiseaseAltdiseaseid).filter(models.DiseaseAltdiseaseid.alt_disease_id == alt_disease_id) return self._limit_and_df(q, limit, as_df)
Get diseases :param bool as_df: if set to True result returns as `pandas.DataFrame` :param int limit: maximum number of results :param str disease_name: disease name :param str disease_id: disease identifier :param str definition: definition of disease :param str parent_ids: parent identifiers, delimiter | :param str tree_numbers: tree numbers, delimiter | :param str parent_tree_numbers: parent tree numbers, delimiter :param str slim_mapping: term derived from the MeSH tree structure for the “Diseases” [C] branch, \ that classifies MEDIC diseases into high-level categories :param str synonym: disease synonyms :param str alt_disease_id: alternative disease identifiers :return: list of :class:`pyctd.manager.models.Disease` object .. seealso:: :class:`pyctd.manager.models.Disease` .. todo:: normalize parent_ids, tree_numbers and parent_tree_numbers in :class:`pyctd.manager.models.Disease`
def spin(self): """Flush any registration notifications and execution results waiting in the ZMQ queue. """ if self._notification_socket: self._flush_notifications() if self._iopub_socket: self._flush_iopub(self._iopub_socket) if self._mux_socket: self._flush_results(self._mux_socket) if self._task_socket: self._flush_results(self._task_socket) if self._control_socket: self._flush_control(self._control_socket) if self._query_socket: self._flush_ignored_hub_replies()
Flush any registration notifications and execution results waiting in the ZMQ queue.
async def iter_all( self, direction: msg.StreamDirection = msg.StreamDirection.Forward, from_position: Optional[Union[msg.Position, msg._PositionSentinel]] = None, batch_size: int = 100, resolve_links: bool = True, require_master: bool = False, correlation_id: Optional[uuid.UUID] = None, ): """ Read through all the events in the database. Args: direction (optional): Controls whether to read forward or backward through the events. Defaults to StreamDirection.Forward from_position (optional): The position to start reading from. Defaults to photonpump.Beginning when direction is Forward, photonpump.End when direction is Backward. batch_size (optional): The maximum number of events to read at a time. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Print every event from the database. >>> with async.connect() as conn: >>> async for event in conn.iter_all() >>> print(event) Print every event from the database in reverse order >>> with async.connect() as conn: >>> async for event in conn.iter_all(direction=StreamDirection.Backward): >>> print(event) Start reading from a known commit position >>> with async.connect() as conn: >>> async for event in conn.iter_all(from_position=Position(12345)) >>> print(event) """ correlation_id = correlation_id cmd = convo.IterAllEvents( msg.Position.for_direction(direction, from_position), batch_size, resolve_links, require_master, direction, self.credential, correlation_id, ) result = await self.dispatcher.start_conversation(cmd) iterator = await result async for event in iterator: yield event
Read through all the events in the database. Args: direction (optional): Controls whether to read forward or backward through the events. Defaults to StreamDirection.Forward from_position (optional): The position to start reading from. Defaults to photonpump.Beginning when direction is Forward, photonpump.End when direction is Backward. batch_size (optional): The maximum number of events to read at a time. resolve_links (optional): True if eventstore should automatically resolve Link Events, otherwise False. required_master (optional): True if this command must be sent direct to the master node, otherwise False. correlation_id (optional): A unique identifer for this command. Examples: Print every event from the database. >>> with async.connect() as conn: >>> async for event in conn.iter_all() >>> print(event) Print every event from the database in reverse order >>> with async.connect() as conn: >>> async for event in conn.iter_all(direction=StreamDirection.Backward): >>> print(event) Start reading from a known commit position >>> with async.connect() as conn: >>> async for event in conn.iter_all(from_position=Position(12345)) >>> print(event)
def __cleanup_breakpoint(self, event, bp): "Auxiliary method." try: process = event.get_process() thread = event.get_thread() bp.disable(process, thread) # clear the debug regs / trap flag except Exception: pass bp.set_condition(True) # break possible circular reference bp.set_action(None)
Auxiliary method.
def word(self): """Property of the DigitWord returning (or setting) the DigitWord as a list of integers (or string representations) of DigitModel. The property is called during instantiation as the property validates the value passed and ensures that all digits are valid.""" if self.wordtype == DigitWord.DIGIT: return self._word else: # Strip out '0x' from the string representation. Note, this could be replaced with the # following code: str(hex(a))[2:] but is more obvious in the code below. return [str(hex(a)).replace('0x', '') for a in self._word]
Property of the DigitWord returning (or setting) the DigitWord as a list of integers (or string representations) of DigitModel. The property is called during instantiation as the property validates the value passed and ensures that all digits are valid.
def main(): '''the main entry point for the HelpMe Command line application. Currently, the user can request help or set config values for a particular helper. ''' # Customize parser parser = get_parser() subparsers = get_subparsers(parser) def help(return_code=0): '''print help, including the software version and active client and exit with return code. ''' version = helpme.__version__ bot.custom(message='Command Line Tool v%s' %version, prefix='\n[HelpMe] ', color='CYAN') parser.print_help() sys.exit(return_code) # If the user didn't provide any arguments, show the full help if len(sys.argv) == 1: help() try: args, unknown = parser.parse_known_args() except: sys.exit(0) extras = None if args.command in HELPME_HELPERS and len(unknown) > 0: extras = unknown # if environment logging variable not set, make silent if args.debug is False: os.environ['MESSAGELEVEL'] = "INFO" # Show the version and exit if args.version is True: print(helpme.__version__) sys.exit(0) if args.command == "config": from .config import main if args.command == "list": from .list import main if args.command in HELPME_HELPERS: from .help import main # Pass on to the correct parser return_code = 0 try: main(args, extras) sys.exit(return_code) except UnboundLocalError: return_code = 1 help(return_code)
the main entry point for the HelpMe Command line application. Currently, the user can request help or set config values for a particular helper.
def from_dict(dic): """ recursive dict to dictobj 컨버트 :param dic: :return: """ return ODict((k, ODict.convert_ifdic(v)) for k, v in dic.items())
recursive dict to dictobj 컨버트 :param dic: :return:
def save(self, fname): """Save figure to SVG file. Parameters ---------- fname : str Full path to file. """ element = _transform.SVGFigure(self.width, self.height) element.append(self) element.save(os.path.join(CONFIG['figure.save_path'], fname))
Save figure to SVG file. Parameters ---------- fname : str Full path to file.
def specific_gains(string): """Convert string with gains of individual amplification elements to dict""" if not string: return {} gains = {} for gain in string.split(','): amp_name, value = gain.split('=') gains[amp_name.strip()] = float(value.strip()) return gains
Convert string with gains of individual amplification elements to dict
def tempfile_writer(target): '''write cache data to a temporary location. when writing is complete, rename the file to the actual location. delete the temporary file on any error''' tmp = target.parent / ('_%s' % target.name) try: with tmp.open('wb') as fd: yield fd except: tmp.unlink() raise LOG.debug('rename %s -> %s', tmp, target) tmp.rename(target)
write cache data to a temporary location. when writing is complete, rename the file to the actual location. delete the temporary file on any error
def get_renderer(self, with_layout=True): """ Get the default renderer """ if with_layout and self.is_lti(): return self._default_renderer_lti elif with_layout: return self._default_renderer else: return self._default_renderer_nolayout
Get the default renderer
def get_objective_requisite_assignment_session_for_objective_bank(self, objective_bank_id, proxy, *args, **kwargs): """Gets the ``OsidSession`` associated with the objective sequencing service for the given objective bank. :param objective_bank_id: the ``Id`` of the objective bank :type objective_bank_id: ``osid.id.Id`` :param proxy: a proxy :type proxy: ``osid.proxy.Proxy`` :return: an ``ObjectiveRequisiteAssignmentSession`` :rtype: ``osid.learning.ObjectiveRequisiteAssignmentSession`` :raise: ``NotFound`` -- ``objective_bank_id`` not found :raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null`` :raise: ``OperationFailed`` -- ``unable to complete request`` :raise: ``Unimplemented`` -- ``supports_objective_requisite_assignment()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_requisite_assignment()`` and ``supports_visible_federation()`` are ``true``.* """ if not objective_bank_id: raise NullArgument if not self.supports_objective_requisite_assignment(): raise Unimplemented() try: from . import sessions except ImportError: raise OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.ObjectiveRequisiteAssignmentSession(objective_bank_id=objective_bank_id, proxy=proxy, runtime=self._runtime) except AttributeError: raise OperationFailed() return session
Gets the ``OsidSession`` associated with the objective sequencing service for the given objective bank. :param objective_bank_id: the ``Id`` of the objective bank :type objective_bank_id: ``osid.id.Id`` :param proxy: a proxy :type proxy: ``osid.proxy.Proxy`` :return: an ``ObjectiveRequisiteAssignmentSession`` :rtype: ``osid.learning.ObjectiveRequisiteAssignmentSession`` :raise: ``NotFound`` -- ``objective_bank_id`` not found :raise: ``NullArgument`` -- ``objective_bank_id`` or ``proxy`` is ``null`` :raise: ``OperationFailed`` -- ``unable to complete request`` :raise: ``Unimplemented`` -- ``supports_objective_requisite_assignment()`` or ``supports_visible_federation()`` is ``false`` *compliance: optional -- This method must be implemented if ``supports_objective_requisite_assignment()`` and ``supports_visible_federation()`` are ``true``.*
def irreg(self, i): """ Renvoie la forme irrégulière de morpho i. excl devient True si elle est exclusive, sinon. :return: Forme irrégulière de morpho i, Exclusivité :rtype: tuple.<str, bool> """ excl = False for ir in self._irregs: if i in ir.morphos(): return ir.grq(), ir.exclusif return "", excl
Renvoie la forme irrégulière de morpho i. excl devient True si elle est exclusive, sinon. :return: Forme irrégulière de morpho i, Exclusivité :rtype: tuple.<str, bool>
def create_free_space_request_content(): """Creates an XML for requesting of free space on remote WebDAV server. :return: the XML string of request content. """ root = etree.Element('propfind', xmlns='DAV:') prop = etree.SubElement(root, 'prop') etree.SubElement(prop, 'quota-available-bytes') etree.SubElement(prop, 'quota-used-bytes') tree = etree.ElementTree(root) return WebDavXmlUtils.etree_to_string(tree)
Creates an XML for requesting of free space on remote WebDAV server. :return: the XML string of request content.
def reset_password_view(self, token): """ Verify the password reset token, Prompt for new password, and set the user's password.""" # Verify token if self.call_or_get(current_user.is_authenticated): logout_user() data_items = self.token_manager.verify_token( token, self.USER_RESET_PASSWORD_EXPIRATION) user = None if data_items: # Get User by user ID user_id = data_items[0] user = self.db_manager.get_user_by_id(user_id) # Mark email as confirmed user_or_user_email_object = self.db_manager.get_primary_user_email_object(user) user_or_user_email_object.email_confirmed_at = datetime.utcnow() self.db_manager.save_object(user_or_user_email_object) self.db_manager.commit() if not user: flash(_('Your reset password token is invalid.'), 'error') return redirect(self._endpoint_url('user.login')) # Initialize form form = self.ResetPasswordFormClass(request.form) # Process valid POST if request.method == 'POST' and form.validate(): # Change password password_hash = self.hash_password(form.new_password.data) user.password=password_hash self.db_manager.save_object(user) self.db_manager.commit() # Send 'password_changed' email if self.USER_ENABLE_EMAIL and self.USER_SEND_PASSWORD_CHANGED_EMAIL: self.email_manager.send_password_changed_email(user) # Send reset_password signal signals.user_reset_password.send(current_app._get_current_object(), user=user) # Flash a system message flash(_("Your password has been reset successfully."), 'success') # Auto-login after reset password or redirect to login page safe_next_url = self._get_safe_next_url('next', self.USER_AFTER_RESET_PASSWORD_ENDPOINT) if self.USER_AUTO_LOGIN_AFTER_RESET_PASSWORD: return self._do_login_user(user, safe_next_url) # auto-login else: return redirect(url_for('user.login') + '?next=' + quote(safe_next_url)) # redirect to login page # Render form self.prepare_domain_translations() return render_template(self.USER_RESET_PASSWORD_TEMPLATE, form=form)
Verify the password reset token, Prompt for new password, and set the user's password.
def location_path(self): """ Return the Location-Path of the response. :rtype : String :return: the Location-Path option """ value = [] for option in self.options: if option.number == defines.OptionRegistry.LOCATION_PATH.number: value.append(str(option.value)) return "/".join(value)
Return the Location-Path of the response. :rtype : String :return: the Location-Path option
def _get_translations_multi_paths(): """Return the correct gettext translations that should be used for this request. This will never fail and return a dummy translation object if used outside of the request or if a translation cannot be found. """ ctx = _request_ctx_stack.top if ctx is None: return None translations = getattr(ctx, "babel_translations", None) if translations is None: babel_ext = ctx.app.extensions["babel"] translations = None trs = None # reverse order: thus the application catalog is loaded last, so that # translations from libraries can be overriden for (dirname, domain) in reversed(babel_ext._translations_paths): trs = Translations.load( dirname, locales=[flask_babel.get_locale()], domain=domain ) # babel.support.Translations is a subclass of # babel.support.NullTranslations, so we test if object has a 'merge' # method if not trs or not hasattr(trs, "merge"): # got None or NullTranslations instance continue elif translations is not None and hasattr(translations, "merge"): translations.merge(trs) else: translations = trs # ensure translations is at least a NullTranslations object if translations is None: translations = trs ctx.babel_translations = translations return translations
Return the correct gettext translations that should be used for this request. This will never fail and return a dummy translation object if used outside of the request or if a translation cannot be found.
def map2slim(subjects, slim, **kwargs): """ Maps a set of subjects (e.g. genes) to a set of slims Result is a list of unique subject-class pairs, with a list of source assocations """ logging.info("SLIM SUBJECTS:{} SLIM:{} CAT:{}".format(subjects,slim,kwargs.get('category'))) searchresult = search_associations(subjects=subjects, slim=slim, facet_fields=[], **kwargs ) pmap = {} for a in searchresult['associations']: subj = a['subject']['id'] slimmed_terms = a['slim'] #logging.info('SLIM: {} {}'.format(subj,slimmed_terms)) for t in slimmed_terms: k = (subj,t) if k not in pmap: pmap[k] = [] pmap[k].append(a) results = [ {'subject': subj, 'slim':t, 'assocs': assocs} for ((subj,t),assocs) in pmap.items()] return results
Maps a set of subjects (e.g. genes) to a set of slims Result is a list of unique subject-class pairs, with a list of source assocations
def append_partition_by_name(self, db_name, tbl_name, part_name): """ Parameters: - db_name - tbl_name - part_name """ self.send_append_partition_by_name(db_name, tbl_name, part_name) return self.recv_append_partition_by_name()
Parameters: - db_name - tbl_name - part_name
def get_many(self, keys): """ Fetch a bunch of keys from the cache. For certain backends (memcached, pgsql) this can be *much* faster when fetching multiple values. Return a dict mapping each key in keys to its value. If the given key is missing, it will be missing from the response dict. """ d = {} for k in keys: val = self.get(k) if val is not None: d[k] = val return d
Fetch a bunch of keys from the cache. For certain backends (memcached, pgsql) this can be *much* faster when fetching multiple values. Return a dict mapping each key in keys to its value. If the given key is missing, it will be missing from the response dict.
def DoRarExtraction(rarArchive, targetFile, dstDir): """ RAR extraction with exception catching Parameters ---------- rarArchive : RarFile object RarFile object to extract. targetFile : string Target file name. dstDir : string Target directory. Returns ---------- boolean False if rar extraction failed, otherwise True. """ try: rarArchive.extract(targetFile, dstDir) except BaseException as ex: goodlogging.Log.Info("EXTRACT", "Extract failed - Exception: {0}".format(ex)) return False else: return True
RAR extraction with exception catching Parameters ---------- rarArchive : RarFile object RarFile object to extract. targetFile : string Target file name. dstDir : string Target directory. Returns ---------- boolean False if rar extraction failed, otherwise True.
def get_meta_regex(schema='mona'): """ Create a dictionary of regex for extracting the meta data for the spectra """ # NOTE: will just ignore cases, to avoid repetition here meta_parse = collections.OrderedDict() if schema == 'mona': meta_parse['collision_energy'] = ['^collision energy(?:=|:)(.*)$'] meta_parse['ms_level'] = ['^ms.*level(?:=|:)\D*(\d*)$', '^ms type(?:=|:)\D*(\d*)$', '^Spectrum_type(?:=|:)\D*(\d*)$'] meta_parse['accession'] = ['^accession(?:=|:)(.*)$', '^DB#(?:=|:)(.*)$'] meta_parse['resolution'] = ['^resolution(?:=|:)(.*)$'] meta_parse['polarity'] = ['^ion.*mode(?:=|:)(.*)$', '^ionization.*mode(?:=|:)(.*)$', '^polarity(?:=|:)(.*)$'] meta_parse['fragmentation_type'] = ['^fragmentation.*mode(?:=|:)(.*)$', '^fragmentation.*type(?:=|:)(.*)$'] meta_parse['precursor_mz'] = ['^precursor m/z(?:=|:)\s*(\d*[.,]?\d*)$', '^precursor.*mz(?:=|:)\s*(\d*[.,]?\d*)$'] meta_parse['precursor_type'] = ['^precursor.*type(?:=|:)(.*)$', '^adduct(?:=|:)(.*)$'] meta_parse['instrument_type'] = ['^instrument.*type(?:=|:)(.*)$'] meta_parse['instrument'] = ['^instrument(?:=|:)(.*)$'] meta_parse['copyright'] = ['^copyright(?:=|:)(.*)$'] # meta_parse['column'] = ['^column(?:=|:)(.*)$'] meta_parse['mass_accuracy'] = ['^mass.*accuracy(?:=|:)\s*(\d*[.,]?\d*)$'] meta_parse['mass_error'] = ['^mass.*error(?:=|:)\s*(\d*[.,]?\d*)$'] meta_parse['origin'] = ['^origin(?:=|:)(.*)$'] meta_parse['name'] = ['^Name(?:=|:)(.*)$'] meta_parse['splash'] = ['^splash:(.*)$'] meta_parse['retention_time'] = ['^retention.*time(?:=|:)\s*(\d*[.,]?\d*)$'] meta_parse['retention_index'] = ['^retention.*index(?:=|:)\s*(\d*[.,]?\d*)$'] elif schema == 'massbank': meta_parse['collision_energy'] = ['^AC\$MASS_SPECTROMETRY:\s+COLLISION_ENERGY\s+(.*)$'] meta_parse['ms_level'] = ['^AC\$MASS_SPECTROMETRY:\s+MS_TYPE\s+\D*(\d*)$'] meta_parse['accession'] = ['^ACCESSION:(.*)$'] meta_parse['resolution'] = ['^AC\$MASS_SPECTROMETRY:\s+RESOLUTION\s+(.*)$'] meta_parse['polarity'] = ['^AC\$MASS_SPECTROMETRY:\s+ION_MODE\s+(.*)$'] meta_parse['fragmentation_type'] = ['^AC\$MASS_SPECTROMETRY:\s+FRAGMENTATION_MODE\s+(.*)$'] meta_parse['precursor_mz'] = ['^MS\$FOCUSED_ION:\s+PRECURSOR_M/Z\s+(\d*[.,]?\d*)$'] meta_parse['precursor_type'] = ['^MS\$FOCUSED_ION:\s+PRECURSOR_TYPE\s+(.*)$'] meta_parse['instrument_type'] = ['^AC\$INSTRUMENT_TYPE:\s+(.*)$'] meta_parse['instrument'] = ['^AC\$INSTRUMENT:\s+(.*)$'] meta_parse['copyright'] = ['^COPYRIGHT:\s+(.*)'] # meta_parse['column'] = ['^column(?:=|:)(.*)$'] meta_parse['mass_accuracy'] = ['^AC\$MASS_SPECTROMETRY:\s+ACCURACY\s+(.*)$'] # need to check meta_parse['mass_error'] = ['^AC\$MASS_SPECTROMETRY:\s+ERROR\s+(.*)$'] # need to check meta_parse['splash'] = ['^PK\$SPLASH:\s+(.*)$'] meta_parse['origin'] = ['^origin(?:=|:)(.*)$'] meta_parse['name'] = ['^RECORD_TITLE:\s+(.*)$'] meta_parse['retention_time'] = ['^AC\$CHROMATOGRAPHY:\s+RETENTION.*TIME\s+(\d*[.,]?\d*)$'] meta_parse['retention_index'] = ['^AC\$CHROMATOGRAPHY:\s+RETENTION.*INDEX\s+(\d*[.,]?\d*)$'] return meta_parse
Create a dictionary of regex for extracting the meta data for the spectra
def in_dir( config_dir=os.path.expanduser('~/.tmuxp'), extensions=['.yml', '.yaml', '.json'] ): """ Return a list of configs in ``config_dir``. Parameters ---------- config_dir : str directory to search extensions : list filetypes to check (e.g. ``['.yaml', '.json']``). Returns ------- list """ configs = [] for filename in os.listdir(config_dir): if is_config_file(filename, extensions) and not filename.startswith('.'): configs.append(filename) return configs
Return a list of configs in ``config_dir``. Parameters ---------- config_dir : str directory to search extensions : list filetypes to check (e.g. ``['.yaml', '.json']``). Returns ------- list
def __realized_bbox(self, requested_bbox): """ The requested bbox might not be aligned to the underlying chunk grid or even outside the bounds of the dataset. Convert the request into a bbox representing something that can be actually downloaded. Returns: Bbox """ realized_bbox = requested_bbox.expand_to_chunk_size(self.underlying, offset=self.voxel_offset) return Bbox.clamp(realized_bbox, self.bounds)
The requested bbox might not be aligned to the underlying chunk grid or even outside the bounds of the dataset. Convert the request into a bbox representing something that can be actually downloaded. Returns: Bbox
def _energy_minimize_openmm( self, tmp_dir, forcefield_files=None, forcefield_name=None, steps=1000, scale_bonds=1, scale_angles=1, scale_torsions=1, scale_nonbonded=1): """ Perform energy minimization using OpenMM Converts an mBuild Compound to a Parmed Structure, applies a forcefield using Foyer, and creates an OpenMM System. Parameters ---------- forcefield_files : str or list of str, optional, default=None Forcefield files to load forcefield_name : str, optional, default=None Apply a named forcefield to the output file using the `foyer` package, e.g. 'oplsaa'. Forcefields listed here: https://github.com/mosdef-hub/foyer/tree/master/foyer/forcefields steps : int, optional, default=1000 Number of energy minimization iterations scale_bonds : float, optional, default=1 Scales the bond force constant (1 is completely on) scale_angles : float, optiona, default=1 Scales the angle force constant (1 is completely on) scale_torsions : float, optional, default=1 Scales the torsional force constants (1 is completely on) scale_nonbonded : float, optional, default=1 Scales epsilon (1 is completely on) Notes ----- Assumes a particular organization for the force groups (HarmonicBondForce, HarmonicAngleForce, RBTorsionForce, NonBondedForce) References ---------- .. [1] P. Eastman, M. S. Friedrichs, J. D. Chodera, R. J. Radmer, C. M. Bruns, J. P. Ku, K. A. Beauchamp, T. J. Lane, L.-P. Wang, D. Shukla, T. Tye, M. Houston, T. Stich, C. Klein, M. R. Shirts, and V. S. Pande. "OpenMM 4: A Reusable, Extensible, Hardware Independent Library for High Performance Molecular Simulation." J. Chem. Theor. Comput. 9(1): 461-469. (2013). """ foyer = import_('foyer') to_parmed = self.to_parmed() ff = foyer.Forcefield(forcefield_files=forcefield_files, name=forcefield_name) to_parmed = ff.apply(to_parmed) from simtk.openmm.app.simulation import Simulation from simtk.openmm.app.pdbreporter import PDBReporter from simtk.openmm.openmm import LangevinIntegrator import simtk.unit as u system = to_parmed.createSystem() integrator = LangevinIntegrator(298 * u.kelvin, 1 / u.picosecond, 0.002 * u.picoseconds) simulation = Simulation(to_parmed.topology, system, integrator) for force in system.getForces(): if type(force).__name__ == "HarmonicBondForce": for bond_index in range(force.getNumBonds()): atom1, atom2, r0, k = force.getBondParameters(bond_index) force.setBondParameters(bond_index, atom1, atom2, r0, k * scale_bonds) force.updateParametersInContext(simulation.context) elif type(force).__name__ == "HarmonicAngleForce": for angle_index in range(force.getNumAngles()): atom1, atom2, atom3, r0, k = force.getAngleParameters( angle_index) force.setAngleParameters(angle_index, atom1, atom2, atom3, r0, k * scale_angles) force.updateParametersInContext(simulation.context) elif type(force).__name__ == "RBTorsionForce": for torsion_index in range(force.getNumTorsions()): atom1, atom2, atom3, atom4, c0, c1, c2, c3, c4, c5 = force.getTorsionParameters( torsion_index) force.setTorsionParameters( torsion_index, atom1, atom2, atom3, atom4, c0 * scale_torsions, c1 * scale_torsions, c2 * scale_torsions, c3 * scale_torsions, c4 * scale_torsions, c5 * scale_torsions) force.updateParametersInContext(simulation.context) elif type(force).__name__ == "NonbondedForce": for nb_index in range(force.getNumParticles()): charge, sigma, epsilon = force.getParticleParameters( nb_index) force.setParticleParameters(nb_index, charge, sigma, epsilon * scale_nonbonded) force.updateParametersInContext(simulation.context) elif type(force).__name__ == "CMMotionRemover": pass else: warn( 'OpenMM Force {} is ' 'not currently supported in _energy_minimize_openmm. ' 'This Force will not be updated!'.format( type(force).__name__)) simulation.context.setPositions(to_parmed.positions) simulation.minimizeEnergy(maxIterations=steps) reporter = PDBReporter(os.path.join(tmp_dir, 'minimized.pdb'), 1) reporter.report( simulation, simulation.context.getState( getPositions=True))
Perform energy minimization using OpenMM Converts an mBuild Compound to a Parmed Structure, applies a forcefield using Foyer, and creates an OpenMM System. Parameters ---------- forcefield_files : str or list of str, optional, default=None Forcefield files to load forcefield_name : str, optional, default=None Apply a named forcefield to the output file using the `foyer` package, e.g. 'oplsaa'. Forcefields listed here: https://github.com/mosdef-hub/foyer/tree/master/foyer/forcefields steps : int, optional, default=1000 Number of energy minimization iterations scale_bonds : float, optional, default=1 Scales the bond force constant (1 is completely on) scale_angles : float, optiona, default=1 Scales the angle force constant (1 is completely on) scale_torsions : float, optional, default=1 Scales the torsional force constants (1 is completely on) scale_nonbonded : float, optional, default=1 Scales epsilon (1 is completely on) Notes ----- Assumes a particular organization for the force groups (HarmonicBondForce, HarmonicAngleForce, RBTorsionForce, NonBondedForce) References ---------- .. [1] P. Eastman, M. S. Friedrichs, J. D. Chodera, R. J. Radmer, C. M. Bruns, J. P. Ku, K. A. Beauchamp, T. J. Lane, L.-P. Wang, D. Shukla, T. Tye, M. Houston, T. Stich, C. Klein, M. R. Shirts, and V. S. Pande. "OpenMM 4: A Reusable, Extensible, Hardware Independent Library for High Performance Molecular Simulation." J. Chem. Theor. Comput. 9(1): 461-469. (2013).
def add_search_path(*path_tokens): """ Adds a new search path from where modules can be loaded. This function is provided for test applications to add locations to the search path, so any required functionality can be loaded. It helps keeping the step implementation modules simple by placing the bulk of the implementation in separate utility libraries. This function can also be used to add the application being tested to the path, so its functionality can be made available for testing. :param arglist path_tokens: Variable list of path tokens that is joined to create the full, absolute path to be added. """ full_path = os.path.join(*path_tokens) if full_path not in sys.path: sys.path.insert(0, os.path.abspath(full_path))
Adds a new search path from where modules can be loaded. This function is provided for test applications to add locations to the search path, so any required functionality can be loaded. It helps keeping the step implementation modules simple by placing the bulk of the implementation in separate utility libraries. This function can also be used to add the application being tested to the path, so its functionality can be made available for testing. :param arglist path_tokens: Variable list of path tokens that is joined to create the full, absolute path to be added.
def uint32_gt(a: int, b: int) -> bool: """ Return a > b. """ half_mod = 0x80000000 return (((a < b) and ((b - a) > half_mod)) or ((a > b) and ((a - b) < half_mod)))
Return a > b.
def transform(self, v3): """ Calculates the vector transformed by this quaternion :param v3: Vector3 to be transformed :returns: transformed vector """ if isinstance(v3, Vector3): t = super(Quaternion, self).transform([v3.x, v3.y, v3.z]) return Vector3(t[0], t[1], t[2]) elif len(v3) == 3: return super(Quaternion, self).transform(v3) else: raise TypeError("param v3 is not a vector type")
Calculates the vector transformed by this quaternion :param v3: Vector3 to be transformed :returns: transformed vector
def set_widths(self, estimation, widths): """Set estimation on widths Parameters ---------- estimation : 1D arrary Either prior of posterior estimation widths : 2D array, in shape [K, 1] Estimation on widths """ estimation[self.map_offset[1]:self.map_offset[2]] = widths.ravel()
Set estimation on widths Parameters ---------- estimation : 1D arrary Either prior of posterior estimation widths : 2D array, in shape [K, 1] Estimation on widths
async def check_passwd(self, identity: str, passwd: str ) -> SessionIdentity : """ 通过密码检查身份 """ assert identity value, _ = await self._client.get(f"{self._prefix_identity}/{identity}") if value is None: logger.debug(f'Not found identity: {identity}') raise Unauthorized(f"无此登录身份'{identity}'") profile = json.loads(value.decode('utf-8')) user_id = profile['user_id'] identity = profile['identity'] hashed = profile['hashed'] if sha256_crypt.verify(passwd, hashed): return SessionIdentity(user_id=user_id, identity=identity) else: raise Unauthorized(f"登录身份'{identity}'认证失败")
通过密码检查身份
def cleanup(self): """ Do cleanup (stop and remove watchdogs that are no longer needed) :return: None """ for task in self.__done_registry: task.stop() self.__done_registry.clear() self.cleanup_event().clear()
Do cleanup (stop and remove watchdogs that are no longer needed) :return: None
def postadressen(self): ''' Returns the postadressen for this Perceel. Will only take the huisnummers with status `inGebruik` into account. :rtype: list ''' return [h.postadres for h in self.huisnummers if h.status.id == '3']
Returns the postadressen for this Perceel. Will only take the huisnummers with status `inGebruik` into account. :rtype: list
def active_trail_nodes(self, variables, observed=None): """ Returns a dictionary with the given variables as keys and all the nodes reachable from that respective variable as values. Parameters ---------- variables: str or array like variables whose active trails are to be found. observed : List of nodes (optional) If given the active trails would be computed assuming these nodes to be observed. Examples -------- >>> from pgmpy.base import DAG >>> student = DAG() >>> student.add_nodes_from(['diff', 'intel', 'grades']) >>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades')]) >>> student.active_trail_nodes('diff') {'diff': {'diff', 'grades'}} >>> student.active_trail_nodes(['diff', 'intel'], observed='grades') {'diff': {'diff', 'intel'}, 'intel': {'diff', 'intel'}} References ---------- Details of the algorithm can be found in 'Probabilistic Graphical Model Principles and Techniques' - Koller and Friedman Page 75 Algorithm 3.1 """ if observed: observed_list = observed if isinstance(observed, (list, tuple)) else [observed] else: observed_list = [] ancestors_list = self._get_ancestors_of(observed_list) # Direction of flow of information # up -> from parent to child # down -> from child to parent active_trails = {} for start in variables if isinstance(variables, (list, tuple)) else [variables]: visit_list = set() visit_list.add((start, 'up')) traversed_list = set() active_nodes = set() while visit_list: node, direction = visit_list.pop() if (node, direction) not in traversed_list: if node not in observed_list: active_nodes.add(node) traversed_list.add((node, direction)) if direction == 'up' and node not in observed_list: for parent in self.predecessors(node): visit_list.add((parent, 'up')) for child in self.successors(node): visit_list.add((child, 'down')) elif direction == 'down': if node not in observed_list: for child in self.successors(node): visit_list.add((child, 'down')) if node in ancestors_list: for parent in self.predecessors(node): visit_list.add((parent, 'up')) active_trails[start] = active_nodes return active_trails
Returns a dictionary with the given variables as keys and all the nodes reachable from that respective variable as values. Parameters ---------- variables: str or array like variables whose active trails are to be found. observed : List of nodes (optional) If given the active trails would be computed assuming these nodes to be observed. Examples -------- >>> from pgmpy.base import DAG >>> student = DAG() >>> student.add_nodes_from(['diff', 'intel', 'grades']) >>> student.add_edges_from([('diff', 'grades'), ('intel', 'grades')]) >>> student.active_trail_nodes('diff') {'diff': {'diff', 'grades'}} >>> student.active_trail_nodes(['diff', 'intel'], observed='grades') {'diff': {'diff', 'intel'}, 'intel': {'diff', 'intel'}} References ---------- Details of the algorithm can be found in 'Probabilistic Graphical Model Principles and Techniques' - Koller and Friedman Page 75 Algorithm 3.1
def set_feature_flag_courses(self, feature, course_id, state=None): """ Set feature flag. Set a feature flag for a given Account, Course, or User. This call will fail if a parent account sets a feature flag for the same feature in any state other than "allowed". """ path = {} data = {} params = {} # REQUIRED - PATH - course_id """ID""" path["course_id"] = course_id # REQUIRED - PATH - feature """ID""" path["feature"] = feature # OPTIONAL - state """"off":: The feature is not available for the course, user, or account and sub-accounts. "allowed":: (valid only on accounts) The feature is off in the account, but may be enabled in sub-accounts and courses by setting a feature flag on the sub-account or course. "on":: The feature is turned on unconditionally for the user, course, or account and sub-accounts.""" if state is not None: self._validate_enum(state, ["off", "allowed", "on"]) data["state"] = state self.logger.debug("PUT /api/v1/courses/{course_id}/features/flags/{feature} with query params: {params} and form data: {data}".format(params=params, data=data, **path)) return self.generic_request("PUT", "/api/v1/courses/{course_id}/features/flags/{feature}".format(**path), data=data, params=params, single_item=True)
Set feature flag. Set a feature flag for a given Account, Course, or User. This call will fail if a parent account sets a feature flag for the same feature in any state other than "allowed".
def _cromwell_move_outputs(metadata, final_dir): """Move Cromwell outputs to the final upload directory. """ sample_key = [k for k in metadata["outputs"].keys() if k.endswith(("rgnames__sample", "rgnames__sample_out"))][0] project_dir = utils.safe_makedir(os.path.join(final_dir, "project")) samples = metadata["outputs"][sample_key] def _copy_with_secondary(f, dirname): if len(f["secondaryFiles"]) > 1: dirname = utils.safe_makedir(os.path.join(dirname, os.path.basename(os.path.dirname(f["location"])))) if not objectstore.is_remote(f["location"]): finalf = os.path.join(dirname, os.path.basename(f["location"])) if not utils.file_uptodate(finalf, f["location"]): shutil.copy(f["location"], dirname) [_copy_with_secondary(sf, dirname) for sf in f["secondaryFiles"]] def _write_to_dir(val, dirname): if isinstance(val, (list, tuple)): [_write_to_dir(v, dirname) for v in val] else: _copy_with_secondary(val, dirname) for k, vals in metadata["outputs"].items(): if k != sample_key: if k.endswith(("summary__multiqc")): vs = [v for v in vals if v] assert len(vs) == 1 _write_to_dir(vs[0], project_dir) elif len(vals) == len(samples): for s, v in zip(samples, vals): if v: _write_to_dir(v, utils.safe_makedir(os.path.join(final_dir, s))) elif len(vals) == 1: _write_to_dir(vals[0], project_dir) elif len(vals) > 0: raise ValueError("Unexpected sample and outputs: %s %s %s" % (k, samples, vals))
Move Cromwell outputs to the final upload directory.