code
stringlengths
75
104k
docstring
stringlengths
1
46.9k
def _save_translations(sender, instance, *args, **kwargs): """ This signal saves model translations. """ # If we are in a site with one language there is no need of saving translations if site_is_monolingual(): return False cls = sender # If its class has no "translatable_fields" then there are no translations if not hasattr(cls._meta, "translatable_fields"): return False # For each translatable field, get its value, computes its md5 and for each language creates its # empty translation. for field in cls._meta.translatable_fields: value = getattr(instance,field) if not value is None: md5_value = checksum(value) setattr( instance, u"md5"+field, md5_value ) for lang in settings.LANGUAGES: lang = lang[0] # print "({0}!={1}) = {2}".format(lang, settings.LANGUAGE_CODE,lang!=settings.LANGUAGE_CODE) if lang != settings.LANGUAGE_CODE: context = u"Updating from object" if hasattr(instance, "trans_context"): context = getattr(instance, "trans_context") trans = FieldTranslation.update(instance, field, lang, context)
This signal saves model translations.
def consume(self, seq): '''Counts all k-mers in sequence.''' for kmer in iter_kmers(seq, self.k, canonical=self.canonical): self._incr(kmer)
Counts all k-mers in sequence.
def _find_home_or_away(self, row): """ Determine whether the player is on the home or away team. Next to every player is their school's name. This name can be matched with the previously parsed home team's name to determine if the player is a member of the home or away team. Parameters ---------- row : PyQuery object A PyQuery object representing a single row in a boxscore table for a single player. Returns ------- str Returns a ``string`` constant denoting whether the team plays for the home or away team. """ name = row('a:last').text() if name == self._home_name.text(): return HOME else: return AWAY
Determine whether the player is on the home or away team. Next to every player is their school's name. This name can be matched with the previously parsed home team's name to determine if the player is a member of the home or away team. Parameters ---------- row : PyQuery object A PyQuery object representing a single row in a boxscore table for a single player. Returns ------- str Returns a ``string`` constant denoting whether the team plays for the home or away team.
def remote_server_command(command, environment, user_profile, **kwargs): """ Wraps web_command function with docker bindings needed to connect to a remote server (such as datacats.com) and run commands there (for example, when you want to copy your catalog to that server). The files binded to the docker image include the user's ssh credentials: ssh_config file, rsa and rsa.pub user keys known_hosts whith public keys of the remote server (if known) The **kwargs (keyword arguments) are passed on to the web_command call intact, see the web_command's doc string for details """ if environment.remote_server_key: temp = tempfile.NamedTemporaryFile(mode="wb") temp.write(environment.remote_server_key) temp.seek(0) known_hosts = temp.name else: known_hosts = get_script_path('known_hosts') binds = { user_profile.profiledir + '/id_rsa': '/root/.ssh/id_rsa', known_hosts: '/root/.ssh/known_hosts', get_script_path('ssh_config'): '/etc/ssh/ssh_config' } if kwargs.get("include_project_dir", None): binds[environment.target] = '/project' del kwargs["include_project_dir"] kwargs["ro"] = binds try: web_command(command, **kwargs) except WebCommandError as e: e.user_description = 'Sending a command to remote server failed' raise e
Wraps web_command function with docker bindings needed to connect to a remote server (such as datacats.com) and run commands there (for example, when you want to copy your catalog to that server). The files binded to the docker image include the user's ssh credentials: ssh_config file, rsa and rsa.pub user keys known_hosts whith public keys of the remote server (if known) The **kwargs (keyword arguments) are passed on to the web_command call intact, see the web_command's doc string for details
def generate_gap_bed(fname, outname): """ Generate a BED file with gap locations. Parameters ---------- fname : str Filename of input FASTA file. outname : str Filename of output BED file. """ f = Fasta(fname) with open(outname, "w") as bed: for chrom in f.keys(): for m in re.finditer(r'N+', f[chrom][:].seq): bed.write("{}\t{}\t{}\n".format(chrom, m.start(0), m.end(0)))
Generate a BED file with gap locations. Parameters ---------- fname : str Filename of input FASTA file. outname : str Filename of output BED file.
def pkgdb(opts): ''' Return modules for SPM's package database .. versionadded:: 2015.8.0 ''' return LazyLoader( _module_dirs( opts, 'pkgdb', base_path=os.path.join(SALT_BASE_PATH, 'spm') ), opts, tag='pkgdb' )
Return modules for SPM's package database .. versionadded:: 2015.8.0
def uninstall(self, pkgname, *args, **kwargs): """A context manager which allows uninstallation of packages from the environment :param str pkgname: The name of a package to uninstall >>> env = Environment("/path/to/env/root") >>> with env.uninstall("pytz", auto_confirm=True, verbose=False) as uninstaller: cleaned = uninstaller.paths >>> if cleaned: print("uninstalled packages: %s" % cleaned) """ auto_confirm = kwargs.pop("auto_confirm", True) verbose = kwargs.pop("verbose", False) with self.activated(): monkey_patch = next(iter( dist for dist in self.base_working_set if dist.project_name == "recursive-monkey-patch" ), None) if monkey_patch: monkey_patch.activate() pip_shims = self.safe_import("pip_shims") pathset_base = pip_shims.UninstallPathSet pathset_base._permitted = PatchedUninstaller._permitted dist = next( iter(filter(lambda d: d.project_name == pkgname, self.get_working_set())), None ) pathset = pathset_base.from_dist(dist) if pathset is not None: pathset.remove(auto_confirm=auto_confirm, verbose=verbose) try: yield pathset except Exception as e: if pathset is not None: pathset.rollback() else: if pathset is not None: pathset.commit() if pathset is None: return
A context manager which allows uninstallation of packages from the environment :param str pkgname: The name of a package to uninstall >>> env = Environment("/path/to/env/root") >>> with env.uninstall("pytz", auto_confirm=True, verbose=False) as uninstaller: cleaned = uninstaller.paths >>> if cleaned: print("uninstalled packages: %s" % cleaned)
def registration_form_received(self, stanza): """Handle registration form received. [client only] Call self.registration_callback with the registration form received as the argument. Use the value returned by the callback will be a filled-in form. :Parameters: - `stanza`: the stanza received. :Types: - `stanza`: `pyxmpp.iq.Iq`""" self.lock.acquire() try: self.__register = Register(stanza.get_query()) self.registration_callback(stanza, self.__register.get_form()) finally: self.lock.release()
Handle registration form received. [client only] Call self.registration_callback with the registration form received as the argument. Use the value returned by the callback will be a filled-in form. :Parameters: - `stanza`: the stanza received. :Types: - `stanza`: `pyxmpp.iq.Iq`
def JZ(cpu, target): """ Jumps short if zero. :param cpu: current CPU. :param target: destination operand. """ cpu.PC = Operators.ITEBV(cpu.address_bit_size, cpu.ZF, target.read(), cpu.PC)
Jumps short if zero. :param cpu: current CPU. :param target: destination operand.
def load(self, fileobj): '''Load the dict from the file object''' # try formats from most restrictive to least restrictive for loader in (pickle.load, json.load, csv.reader): fileobj.seek(0) try: return self.initial_update(loader(fileobj)) except Exception as e: pass raise ValueError('File not in a supported format')
Load the dict from the file object
def get_stroke_glide_indices(A_g_hf, fs_a, J, t_max): '''Get stroke and glide indices from high-pass accelerometer data Args ---- A_g_hf: 1-D ndarray Animal frame triaxial accelerometer matrix at sampling rate fs_a. fs_a: int Number of accelerometer samples per second J: float Frequency threshold for detecting a fluke stroke in m/s^2. If J is not given, fluke strokes will not be located but the rotations signal (pry) will be computed. t_max: int Maximum duration allowable for a fluke stroke in seconds. A fluke stroke is counted whenever there is a cyclic variation in the pitch deviation with peak-to-peak magnitude greater than +/-J and consistent with a fluke stroke duration of less than t_max seconds, e.g., for Mesoplodon choose t_max=4. Returns ------- GL: 1-D ndarray Matrix containing the start time (first column) and end time (2nd column) of any glides (i.e., no zero crossings in t_max or more seconds). Times are in seconds. Note ---- If no J or t_max is given, J=[], or t_max=[], GL returned as None ''' import numpy from . import dsp # Check if input array is 1-D if A_g_hf.ndim > 1: raise IndexError('A_g_hf multidimensional: Glide index determination ' 'requires 1-D acceleration array as input') # Convert t_max to number of samples n_max = t_max * fs_a # Find zero-crossing start/stops in pry(:,n), rotations around n axis. zc = dsp.findzc(A_g_hf, J, n_max/2) # find glides - any interval between zeros crossings greater than `t_max` ind = numpy.where(zc[1:, 0] - zc[0:-1, 1] > n_max)[0] gl_ind = numpy.vstack([zc[ind, 0] - 1, zc[ind + 1, 1] + 1]).T # Compute mean index position of glide, Only include sections with jerk < J gl_mean_idx = numpy.round(numpy.mean(gl_ind, 1)).astype(int) gl_ind = numpy.round(gl_ind).astype(int) for i in range(len(gl_mean_idx)): col = range(gl_mean_idx[i], gl_ind[i, 0], - 1) test = numpy.where(numpy.isnan(A_g_hf[col]))[0] if test.size != 0: gl_mean_idx[i] = numpy.nan gl_ind[i,0] = numpy.nan gl_ind[i,1] = numpy.nan else: over_J1 = numpy.where(abs(A_g_hf[col]) >= J)[0][0] gl_ind[i,0] = gl_mean_idx[i] - over_J1 + 1 col = range(gl_mean_idx[i], gl_ind[i, 1]) over_J2 = numpy.where(abs(A_g_hf[col]) >= J)[0][0] gl_ind[i,1] = gl_mean_idx[i] + over_J2 - 1 GL = gl_ind GL = GL[numpy.where(GL[:, 1] - GL[:, 0] > n_max / 2)[0], :] return GL
Get stroke and glide indices from high-pass accelerometer data Args ---- A_g_hf: 1-D ndarray Animal frame triaxial accelerometer matrix at sampling rate fs_a. fs_a: int Number of accelerometer samples per second J: float Frequency threshold for detecting a fluke stroke in m/s^2. If J is not given, fluke strokes will not be located but the rotations signal (pry) will be computed. t_max: int Maximum duration allowable for a fluke stroke in seconds. A fluke stroke is counted whenever there is a cyclic variation in the pitch deviation with peak-to-peak magnitude greater than +/-J and consistent with a fluke stroke duration of less than t_max seconds, e.g., for Mesoplodon choose t_max=4. Returns ------- GL: 1-D ndarray Matrix containing the start time (first column) and end time (2nd column) of any glides (i.e., no zero crossings in t_max or more seconds). Times are in seconds. Note ---- If no J or t_max is given, J=[], or t_max=[], GL returned as None
def AddArguments(cls, argument_group): """Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group. """ argument_group.add_argument( '--append', dest='append', action='store_true', default=False, required=cls._DEFAULT_APPEND, help=( 'Defines whether the intention is to append to an already ' 'existing database or overwrite it. Defaults to overwrite.')) argument_group.add_argument( '--evidence', dest='evidence', type=str, default=cls._DEFAULT_EVIDENCE, action='store', required=False, help='Set the evidence field to a specific value, defaults to empty.') argument_group.add_argument( '--fields', dest='fields', type=str, action='store', default=cls._DEFAULT_FIELDS, help=( 'Defines which fields should be indexed in the database.')) argument_group.add_argument( '--additional_fields', dest='additional_fields', type=str, action='store', default='', help=( 'Defines extra fields to be included in the output, in addition to' ' the default fields, which are {0:s}.'.format( cls._DEFAULT_FIELDS)))
Adds command line arguments the helper supports to an argument group. This function takes an argument parser or an argument group object and adds to it all the command line arguments this helper supports. Args: argument_group (argparse._ArgumentGroup|argparse.ArgumentParser): argparse group.
def config_dict(config): """ Given a Sphinx config object, return a dictionary of config values. """ return dict( (key, getattr(config, key)) for key in config.values )
Given a Sphinx config object, return a dictionary of config values.
def retry(tries, delay=0, back_off=1, raise_msg=''): """Retries a function or method until it got True. - ``delay`` sets the initial delay in seconds - ``back_off`` sets the factor by which - ``raise_msg`` if not '', it'll raise an Exception """ if back_off < 1: raise ValueError('back_off must be 1 or greater') tries = math.floor(tries) if tries < 0: raise ValueError('tries must be 0 or greater') if delay < 0: raise ValueError('delay must be 0 or greater') def deco_retry(f): def f_retry(*args, **kwargs): max_tries, max_delay = tries, delay # make mutable while max_tries > 0: rv = f(*args, **kwargs) # first attempt if rv: # Done on success return rv max_tries -= 1 # consume an attempt time.sleep(max_delay) # wait... max_delay *= back_off # make future wait longer else: if raise_msg: raise Exception(raise_msg) return return f_retry # true decorator -> decorated function return deco_retry
Retries a function or method until it got True. - ``delay`` sets the initial delay in seconds - ``back_off`` sets the factor by which - ``raise_msg`` if not '', it'll raise an Exception
def bootstrap(ns_var_name: str = NS_VAR_NAME, core_ns_name: str = CORE_NS) -> None: """Bootstrap the environment with functions that are are difficult to express with the very minimal lisp environment.""" core_ns_sym = sym.symbol(core_ns_name) ns_var_sym = sym.symbol(ns_var_name, ns=core_ns_name) __NS = Maybe(Var.find(ns_var_sym)).or_else_raise( lambda: RuntimeException(f"Dynamic Var {ns_var_sym} not bound!") ) def in_ns(s: sym.Symbol): ns = Namespace.get_or_create(s) __NS.value = ns return ns Var.intern_unbound(core_ns_sym, sym.symbol("unquote")) Var.intern_unbound(core_ns_sym, sym.symbol("unquote-splicing")) Var.intern( core_ns_sym, sym.symbol("in-ns"), in_ns, meta=lmap.map({_REDEF_META_KEY: True}) ) Var.intern( core_ns_sym, sym.symbol(_PRINT_GENERATED_PY_VAR_NAME), False, dynamic=True, meta=lmap.map({_PRIVATE_META_KEY: True}), ) Var.intern( core_ns_sym, sym.symbol(_GENERATED_PYTHON_VAR_NAME), "", dynamic=True, meta=lmap.map({_PRIVATE_META_KEY: True}), ) # Dynamic Vars for controlling printing Var.intern( core_ns_sym, sym.symbol(_PRINT_DUP_VAR_NAME), lobj.PRINT_DUP, dynamic=True ) Var.intern( core_ns_sym, sym.symbol(_PRINT_LENGTH_VAR_NAME), lobj.PRINT_LENGTH, dynamic=True ) Var.intern( core_ns_sym, sym.symbol(_PRINT_LEVEL_VAR_NAME), lobj.PRINT_LEVEL, dynamic=True ) Var.intern( core_ns_sym, sym.symbol(_PRINT_META_VAR_NAME), lobj.PRINT_META, dynamic=True ) Var.intern( core_ns_sym, sym.symbol(_PRINT_READABLY_VAR_NAME), lobj.PRINT_READABLY, dynamic=True, )
Bootstrap the environment with functions that are are difficult to express with the very minimal lisp environment.
def _storage_list_keys(bucket, pattern): """ List all storage keys in a specified bucket that match a pattern. """ data = [{'Name': item.metadata.name, 'Type': item.metadata.content_type, 'Size': item.metadata.size, 'Updated': item.metadata.updated_on} for item in _storage_get_keys(bucket, pattern)] return datalab.utils.commands.render_dictionary(data, ['Name', 'Type', 'Size', 'Updated'])
List all storage keys in a specified bucket that match a pattern.
def build_slabs(self): """ Builds the reconstructed slab by: (1) Obtaining the unreconstructed slab using the specified parameters for the SlabGenerator. (2) Applying the appropriate lattice transformation in the a and b lattice vectors. (3) Remove any specified sites from both surfaces. (4) Add any specified sites to both surfaces. Returns: (Slab): The reconstructed slab. """ slabs = self.get_unreconstructed_slabs() recon_slabs = [] for slab in slabs: d = get_d(slab) top_site = sorted(slab, key=lambda site: site.frac_coords[2])[-1].coords # Remove any specified sites if "points_to_remove" in self.reconstruction_json.keys(): pts_to_rm = copy.deepcopy(self.reconstruction_json["points_to_remove"]) for p in pts_to_rm: p[2] = slab.lattice.get_fractional_coords([top_site[0], top_site[1], top_site[2]+p[2]*d])[2] cart_point = slab.lattice.get_cartesian_coords(p) dist = [site.distance_from_point(cart_point) for site in slab] site1 = dist.index(min(dist)) slab.symmetrically_remove_atoms([site1]) # Add any specified sites if "points_to_add" in self.reconstruction_json.keys(): pts_to_add = copy.deepcopy(self.reconstruction_json["points_to_add"]) for p in pts_to_add: p[2] = slab.lattice.get_fractional_coords([top_site[0], top_site[1], top_site[2]+p[2]*d])[2] slab.symmetrically_add_atom(slab[0].specie, p) slab.reconstruction = self.name setattr(slab, "recon_trans_matrix", self.trans_matrix) # Get the oriented_unit_cell with the same axb area. ouc = slab.oriented_unit_cell.copy() ouc.make_supercell(self.trans_matrix) slab.oriented_unit_cell = ouc recon_slabs.append(slab) return recon_slabs
Builds the reconstructed slab by: (1) Obtaining the unreconstructed slab using the specified parameters for the SlabGenerator. (2) Applying the appropriate lattice transformation in the a and b lattice vectors. (3) Remove any specified sites from both surfaces. (4) Add any specified sites to both surfaces. Returns: (Slab): The reconstructed slab.
def cmServicePrompt(): """CM SERVICE PROMPT Section 9.2.5a""" a = TpPd(pd=0x5) b = MessageType(mesType=0x25) # 00100101 c = PdAndSapi() packet = a / b / c return packet
CM SERVICE PROMPT Section 9.2.5a
def on_song_changed(self, song): """bind song changed signal with this""" if song is None or song.lyric is None: self._lyric = None self._pos_s_map = {} else: self._lyric = song.lyric.content self._pos_s_map = parse(self._lyric) self._pos_list = sorted(list(self._pos_s_map.keys())) self._pos = None self.current_sentence = ''
bind song changed signal with this
def has_path(nodes, A, B): r"""Test if nodes from a breadth_first_order search lead from A to B. Parameters ---------- nodes : array_like Nodes from breadth_first_oder_seatch A : array_like The set of educt states B : array_like The set of product states Returns ------- has_path : boolean True if there exists a path, else False """ x1 = np.intersect1d(nodes, A).size > 0 x2 = np.intersect1d(nodes, B).size > 0 return x1 and x2
r"""Test if nodes from a breadth_first_order search lead from A to B. Parameters ---------- nodes : array_like Nodes from breadth_first_oder_seatch A : array_like The set of educt states B : array_like The set of product states Returns ------- has_path : boolean True if there exists a path, else False
def add_string_label(self, str_): """ Maps ("folds") the given string, returning an unique label ID. This allows several constant labels to be initialized to the same address thus saving memory space. :param str_: the string to map :return: the unique label ID """ if self.STRING_LABELS.get(str_, None) is None: self.STRING_LABELS[str_] = backend.tmp_label() return self.STRING_LABELS[str_]
Maps ("folds") the given string, returning an unique label ID. This allows several constant labels to be initialized to the same address thus saving memory space. :param str_: the string to map :return: the unique label ID
def rank_loss(sentence_emb, image_emb, margin=0.2): """Experimental rank loss, thanks to kkurach@ for the code.""" with tf.name_scope("rank_loss"): # Normalize first as this is assumed in cosine similarity later. sentence_emb = tf.nn.l2_normalize(sentence_emb, 1) image_emb = tf.nn.l2_normalize(image_emb, 1) # Both sentence_emb and image_emb have size [batch, depth]. scores = tf.matmul(image_emb, tf.transpose(sentence_emb)) # [batch, batch] diagonal = tf.diag_part(scores) # [batch] cost_s = tf.maximum(0.0, margin - diagonal + scores) # [batch, batch] cost_im = tf.maximum( 0.0, margin - tf.reshape(diagonal, [-1, 1]) + scores) # [batch, batch] # Clear diagonals. batch_size = tf.shape(sentence_emb)[0] empty_diagonal_mat = tf.ones_like(cost_s) - tf.eye(batch_size) cost_s *= empty_diagonal_mat cost_im *= empty_diagonal_mat return tf.reduce_mean(cost_s) + tf.reduce_mean(cost_im)
Experimental rank loss, thanks to kkurach@ for the code.
def setwinsize(self, r, c): """This sets the terminal window size of the child tty. This will cause a SIGWINCH signal to be sent to the child. This does not change the physical window size. It changes the size reported to TTY-aware applications like vi or curses -- applications that respond to the SIGWINCH signal. """ # Check for buggy platforms. Some Python versions on some platforms # (notably OSF1 Alpha and RedHat 7.1) truncate the value for # termios.TIOCSWINSZ. It is not clear why this happens. # These platforms don't seem to handle the signed int very well; # yet other platforms like OpenBSD have a large negative value for # TIOCSWINSZ and they don't have a truncate problem. # Newer versions of Linux have totally different values for TIOCSWINSZ. # Note that this fix is a hack. TIOCSWINSZ = getattr(termios, 'TIOCSWINSZ', -2146929561) if TIOCSWINSZ == 2148037735L: # L is not required in Python >= 2.2. TIOCSWINSZ = -2146929561 # Same bits, but with sign. # Note, assume ws_xpixel and ws_ypixel are zero. s = struct.pack('HHHH', r, c, 0, 0) fcntl.ioctl(self.fileno(), TIOCSWINSZ, s)
This sets the terminal window size of the child tty. This will cause a SIGWINCH signal to be sent to the child. This does not change the physical window size. It changes the size reported to TTY-aware applications like vi or curses -- applications that respond to the SIGWINCH signal.
def estimate_pos_and_err_parabolic(tsvals): """Solve for the position and uncertainty of source in one dimension assuming that you are near the maximum and the errors are parabolic Parameters ---------- tsvals : `~numpy.ndarray` The TS values at the maximum TS, and for each pixel on either side Returns ------- The position and uncertainty of the source, in pixel units w.r.t. the center of the maximum pixel """ a = tsvals[2] - tsvals[0] bc = 2. * tsvals[1] - tsvals[0] - tsvals[2] s = a / (2 * bc) err = np.sqrt(2 / bc) return s, err
Solve for the position and uncertainty of source in one dimension assuming that you are near the maximum and the errors are parabolic Parameters ---------- tsvals : `~numpy.ndarray` The TS values at the maximum TS, and for each pixel on either side Returns ------- The position and uncertainty of the source, in pixel units w.r.t. the center of the maximum pixel
def _effectinit_raise_col_padding_on_focus(self, name, **kwargs): """Init the column padding on focus effect. Keyword arguments can contain enlarge_time and padding. """ self._effects[name] = kwargs if "enlarge_time" not in kwargs: kwargs['enlarge_time'] = 0.5 if "padding" not in kwargs: kwargs['padding'] = 10 kwargs['padding_pps'] = kwargs['padding'] / kwargs['enlarge_time'] for option in self.options: option['padding_col'] = 0.0
Init the column padding on focus effect. Keyword arguments can contain enlarge_time and padding.
def eval(self, expr, n, extra_constraints=(), solver=None, model_callback=None): """ This function returns up to `n` possible solutions for expression `expr`. :param expr: expression (an AST) to evaluate :param n: number of results to return :param solver: a solver object, native to the backend, to assist in the evaluation (for example, a z3.Solver) :param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve :param model_callback: a function that will be executed with recovered models (if any) :return: A sequence of up to n results (backend objects) """ if self._solver_required and solver is None: raise BackendError("%s requires a solver for evaluation" % self.__class__.__name__) results = self._eval( self.convert(expr), n, extra_constraints=self.convert_list(extra_constraints), solver=solver, model_callback=model_callback ) results = list(results) if type(expr) is not BV: return results size = expr.length for i in range(len(results)): results[i] &= (1 << size) - 1 # convert it back to unsigned # solver is done, terminate process solver.terminate() return results
This function returns up to `n` possible solutions for expression `expr`. :param expr: expression (an AST) to evaluate :param n: number of results to return :param solver: a solver object, native to the backend, to assist in the evaluation (for example, a z3.Solver) :param extra_constraints: extra constraints (as ASTs) to add to the solver for this solve :param model_callback: a function that will be executed with recovered models (if any) :return: A sequence of up to n results (backend objects)
def _is_reference(bpe): """Return True if the element is an entity reference.""" if isinstance(bpe, _bp('ProteinReference')) or \ isinstance(bpe, _bpimpl('ProteinReference')) or \ isinstance(bpe, _bp('SmallMoleculeReference')) or \ isinstance(bpe, _bpimpl('SmallMoleculeReference')) or \ isinstance(bpe, _bp('RnaReference')) or \ isinstance(bpe, _bpimpl('RnaReference')) or \ isinstance(bpe, _bp('EntityReference')) or \ isinstance(bpe, _bpimpl('EntityReference')): return True else: return False
Return True if the element is an entity reference.
def close(self): '''close the graph''' self.close_graph.set() if self.is_alive(): self.child.join(2)
close the graph
def get_api_client(): """Gets the reference to the API cient (singleton).""" with _api_lock: global _api_client if not _api_client: conf_file = os.path.join(os.environ.get("HOME"), ".python-grid5000.yaml") _api_client = Client.from_yaml(conf_file) return _api_client
Gets the reference to the API cient (singleton).
def list_teams(profile="github", ignore_cache=False): ''' Lists all teams with the organization. profile The name of the profile configuration to use. Defaults to ``github``. ignore_cache Bypasses the use of cached teams. CLI Example: .. code-block:: bash salt myminion github.list_teams .. versionadded:: 2016.11.0 ''' key = 'github.{0}:teams'.format( _get_config_value(profile, 'org_name') ) if key not in __context__ or ignore_cache: client = _get_client(profile) organization = client.get_organization( _get_config_value(profile, 'org_name') ) teams_data = organization.get_teams() teams = {} for team in teams_data: # Note that _rawData is used to access some properties here as they # are not exposed in older versions of PyGithub. It's VERY important # to use team._rawData instead of team.raw_data, as the latter forces # an API call to retrieve team details again. teams[team.name] = { 'id': team.id, 'slug': team.slug, 'description': team._rawData['description'], 'permission': team.permission, 'privacy': team._rawData['privacy'] } __context__[key] = teams return __context__[key]
Lists all teams with the organization. profile The name of the profile configuration to use. Defaults to ``github``. ignore_cache Bypasses the use of cached teams. CLI Example: .. code-block:: bash salt myminion github.list_teams .. versionadded:: 2016.11.0
def __advice_stack_frame_protection(self, frame): """ Overriding of this is only permitted if and only if your name is Megumin and you have a pet/familiar named Chomusuke. """ if frame is None: logger.debug( 'currentframe() returned None; frame protection disabled') return f_back = frame.f_back while f_back: if f_back.f_code is self.handle.__code__: raise RuntimeError( "indirect invocation of '%s' by 'handle' is forbidden" % frame.f_code.co_name, ) f_back = f_back.f_back
Overriding of this is only permitted if and only if your name is Megumin and you have a pet/familiar named Chomusuke.
def describe_policy(policyName, region=None, key=None, keyid=None, profile=None): ''' Given a policy name describe its properties. Returns a dictionary of interesting properties. CLI Example: .. code-block:: bash salt myminion boto_iot.describe_policy mypolicy ''' try: conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile) policy = conn.get_policy(policyName=policyName) if policy: keys = ('policyName', 'policyArn', 'policyDocument', 'defaultVersionId') return {'policy': dict([(k, policy.get(k)) for k in keys])} else: return {'policy': None} except ClientError as e: err = __utils__['boto3.get_error'](e) if e.response.get('Error', {}).get('Code') == 'ResourceNotFoundException': return {'policy': None} return {'error': __utils__['boto3.get_error'](e)}
Given a policy name describe its properties. Returns a dictionary of interesting properties. CLI Example: .. code-block:: bash salt myminion boto_iot.describe_policy mypolicy
def write(self, fptr): """Write a UUID box to file. """ length = 4 + 4 + 16 + len(self.raw_data) write_buffer = struct.pack('>I4s', length, b'uuid') fptr.write(write_buffer) fptr.write(self.uuid.bytes) fptr.write(self.raw_data)
Write a UUID box to file.
def _infer(self, request): """Returns JSON for the `vz-line-chart`s for a feature. Args: request: A request that should contain 'inference_address', 'model_name', 'model_type, 'model_version', 'model_signature' and 'label_vocab_path'. Returns: A list of JSON objects, one for each chart. """ label_vocab = inference_utils.get_label_vocab( request.args.get('label_vocab_path')) try: if request.method != 'GET': logger.error('%s requests are forbidden.', request.method) return http_util.Respond(request, {'error': 'invalid non-GET request'}, 'application/json', code=405) (inference_addresses, model_names, model_versions, model_signatures) = self._parse_request_arguments(request) indices_to_infer = sorted(self.updated_example_indices) examples_to_infer = [self.examples[index] for index in indices_to_infer] infer_objs = [] for model_num in xrange(len(inference_addresses)): serving_bundle = inference_utils.ServingBundle( inference_addresses[model_num], model_names[model_num], request.args.get('model_type'), model_versions[model_num], model_signatures[model_num], request.args.get('use_predict') == 'true', request.args.get('predict_input_tensor'), request.args.get('predict_output_tensor')) infer_objs.append(inference_utils.run_inference_for_inference_results( examples_to_infer, serving_bundle)) resp = {'indices': indices_to_infer, 'results': infer_objs} self.updated_example_indices = set() return http_util.Respond(request, {'inferences': json.dumps(resp), 'vocab': json.dumps(label_vocab)}, 'application/json') except common_utils.InvalidUserInputError as e: return http_util.Respond(request, {'error': e.message}, 'application/json', code=400) except AbortionError as e: return http_util.Respond(request, {'error': e.details}, 'application/json', code=400)
Returns JSON for the `vz-line-chart`s for a feature. Args: request: A request that should contain 'inference_address', 'model_name', 'model_type, 'model_version', 'model_signature' and 'label_vocab_path'. Returns: A list of JSON objects, one for each chart.
def compare_profiles(profile1, profile2): """ Given two profiles, determine the ratio of similarity, i.e. the hamming distance between the strings. Args: profile1/2 (str): profile string Returns: similarity_ratio (float): the ratio of similiarity (0-1) """ length = len(profile1) profile1 = np.array(list(profile1)) profile2 = np.array(list(profile2)) similarity_array = profile1 == profile2 matches = np.sum(similarity_array) similarity_ratio = matches/length return similarity_ratio
Given two profiles, determine the ratio of similarity, i.e. the hamming distance between the strings. Args: profile1/2 (str): profile string Returns: similarity_ratio (float): the ratio of similiarity (0-1)
def insert_first(self, val): """Insert in head :param val: Object to insert :return: True iff insertion completed successfully """ self.head = Node(val, next_node=self.head) return True
Insert in head :param val: Object to insert :return: True iff insertion completed successfully
def cleanup_lines( lines, **kwargs ): ''' Cleans up annotation after syntactic pre-processing and processing: -- Removes embedded clause boundaries "<{>" and "<}>"; -- Removes CLBC markings from analysis; -- Removes additional information between < and > from analysis; -- Removes additional information between " and " from analysis; -- If remove_caps==True , removes 'cap' annotations from analysis; -- If remove_clo==True , removes CLO CLC CLB markings from analysis; -- If double_quotes=='esc' then " will be overwritten with \\"; and if double_quotes=='unesc' then \\" will be overwritten with "; -- If fix_sent_tags=True, then sentence tags (<s> and </s>) will be checked for mistakenly added analysis, and found analysis will be removed; Returns the input list, which has been cleaned from additional information; ''' if not isinstance( lines, list ): raise Exception('(!) Unexpected type of input argument! Expected a list of strings.') remove_caps = False remove_clo = False double_quotes = None fix_sent_tags = False for argName, argVal in kwargs.items() : if argName in ['remove_caps', 'remove_cap']: remove_caps = bool(argVal) if argName == 'remove_clo': remove_clo = bool(argVal) if argName == 'fix_sent_tags': fix_sent_tags = bool(argVal) if argName in ['double_quotes', 'quotes'] and argVal and \ argVal.lower() in ['esc', 'escape', 'unesc', 'unescape']: double_quotes = argVal.lower() pat_token_line = re.compile('^"<(.+)>"\s*$') pat_analysis_start = re.compile('^(\s+)"(.+)"(\s[LZT].*)$') i = 0 to_delete = [] while ( i < len(lines) ): line = lines[i] isAnalysisLine = line.startswith(' ') or line.startswith('\t') if not isAnalysisLine: removeCurrentTokenAndAnalysis = False # 1) Remove embedded clause boundaries "<{>" and "<}>" if line.startswith('"<{>"'): if i+1 == len(lines) or (i+1 < len(lines) and not '"{"' in lines[i+1]): removeCurrentTokenAndAnalysis = True if line.startswith('"<}>"'): if i+1 == len(lines) or (i+1 < len(lines) and not '"}"' in lines[i+1]): removeCurrentTokenAndAnalysis = True if removeCurrentTokenAndAnalysis: # Remove the current token and all the subsequent analyses del lines[i] j=i while ( j < len(lines) ): line2 = lines[j] if line2.startswith(' ') or line2.startswith('\t'): del lines[j] else: break continue # 2) Convert double quotes (if required) if double_quotes: # '^"<(.+)>"\s*$' if pat_token_line.match( lines[i] ): token_cleaned = (pat_token_line.match(lines[i])).group(1) # Escape or unescape double quotes if double_quotes in ['esc', 'escape']: token_cleaned = token_cleaned.replace('"', '\\"') lines[i] = '"<'+token_cleaned+'>"' elif double_quotes in ['unesc', 'unescape']: token_cleaned = token_cleaned.replace('\\"', '"') lines[i] = '"<'+token_cleaned+'>"' else: # Normalize analysis line lines[i] = re.sub('^\s{4,}', '\t', lines[i]) # Remove clause boundary markings lines[i] = re.sub('(.*)" ([LZT].*) CLBC (.*)', '\\1" \\2 \\3', lines[i]) # Remove additional information that was added during the analysis lines[i] = re.sub('(.*)" L([^"<]*) ["<]([^@]*) (@.*)', '\\1" L\\2 \\4', lines[i]) # Remove 'cap' tags if remove_caps: lines[i] = lines[i].replace(' cap ', ' ') # Convert double quotes (if required) if double_quotes and double_quotes in ['unesc', 'unescape']: lines[i] = lines[i].replace('\\"', '"') elif double_quotes and double_quotes in ['esc', 'escape']: m = pat_analysis_start.match( lines[i] ) if m: # '^(\s+)"(.+)"(\s[LZT].*)$' start = m.group(1) content = m.group(2) end = m.group(3) content = content.replace('"', '\\"') lines[i] = ''.join([start, '"', content, '"', end]) # Remove CLO CLC CLB markings if remove_clo and 'CL' in lines[i]: lines[i] = re.sub('\sCL[OCB]', ' ', lines[i]) lines[i] = re.sub('\s{2,}', ' ', lines[i]) # Fix sentence tags that mistakenly could have analysis (in EDT corpus) if fix_sent_tags: if i-1 > -1 and ('"</s>"' in lines[i-1] or '"<s>"' in lines[i-1]): lines[i] = '' i += 1 return lines
Cleans up annotation after syntactic pre-processing and processing: -- Removes embedded clause boundaries "<{>" and "<}>"; -- Removes CLBC markings from analysis; -- Removes additional information between < and > from analysis; -- Removes additional information between " and " from analysis; -- If remove_caps==True , removes 'cap' annotations from analysis; -- If remove_clo==True , removes CLO CLC CLB markings from analysis; -- If double_quotes=='esc' then " will be overwritten with \\"; and if double_quotes=='unesc' then \\" will be overwritten with "; -- If fix_sent_tags=True, then sentence tags (<s> and </s>) will be checked for mistakenly added analysis, and found analysis will be removed; Returns the input list, which has been cleaned from additional information;
def config(self): """Get a listing of mobile client configuration settings.""" response = self._call( mc_calls.Config ) config_list = response.body.get('data', {}).get('entries', []) return config_list
Get a listing of mobile client configuration settings.
def change_puk(ctx, puk, new_puk): """ Change the PUK code. If the PIN is lost or blocked it can be reset using a PUK. The PUK must be between 6 and 8 characters long, and supports any type of alphanumeric characters. """ controller = ctx.obj['controller'] if not puk: puk = _prompt_pin(ctx, prompt='Enter your current PUK') if not new_puk: new_puk = click.prompt( 'Enter your new PUK', default='', hide_input=True, show_default=False, confirmation_prompt=True, err=True) if not _valid_pin_length(puk): ctx.fail('Current PUK must be between 6 and 8 characters long.') if not _valid_pin_length(new_puk): ctx.fail('New PUK must be between 6 and 8 characters long.') try: controller.change_puk(puk, new_puk) click.echo('New PUK set.') except AuthenticationBlocked as e: logger.debug('PUK is blocked.', exc_info=e) ctx.fail('PUK is blocked.') except WrongPuk as e: logger.debug( 'Failed to change PUK, %d tries left', e.tries_left, exc_info=e) ctx.fail('PUK change failed - %d tries left.' % e.tries_left)
Change the PUK code. If the PIN is lost or blocked it can be reset using a PUK. The PUK must be between 6 and 8 characters long, and supports any type of alphanumeric characters.
def apply_substitutions(monomial, monomial_substitutions, pure=False): """Helper function to remove monomials from the basis.""" if is_number_type(monomial): return monomial original_monomial = monomial changed = True if not pure: substitutions = monomial_substitutions else: substitutions = {} for lhs, rhs in monomial_substitutions.items(): irrelevant = False for atom in lhs.atoms(): if atom.is_Number: continue if not monomial.has(atom): irrelevant = True break if not irrelevant: substitutions[lhs] = rhs while changed: for lhs, rhs in substitutions.items(): monomial = fast_substitute(monomial, lhs, rhs) if original_monomial == monomial: changed = False original_monomial = monomial return monomial
Helper function to remove monomials from the basis.
def add_router_interface(self, context, router_info): """Adds an interface to a router created on Arista HW router. This deals with both IPv6 and IPv4 configurations. """ if router_info: self._select_dicts(router_info['ip_version']) cidr = router_info['cidr'] subnet_mask = cidr.split('/')[1] router_name = self._arista_router_name(router_info['id'], router_info['name']) if self._mlag_configured: # For MLAG, we send a specific IP address as opposed to cidr # For now, we are using x.x.x.253 and x.x.x.254 as virtual IP mlag_peer_failed = False for i, server in enumerate(self._servers): # Get appropriate virtual IP address for this router router_ip = self._get_router_ip(cidr, i, router_info['ip_version']) try: self.add_interface_to_router(router_info['seg_id'], router_name, router_info['gip'], router_ip, subnet_mask, server) mlag_peer_failed = False except Exception: if not mlag_peer_failed: mlag_peer_failed = True else: msg = (_('Failed to add interface to router ' '%s on EOS') % router_name) LOG.exception(msg) raise arista_exc.AristaServicePluginRpcError( msg=msg) else: for s in self._servers: self.add_interface_to_router(router_info['seg_id'], router_name, router_info['gip'], None, subnet_mask, s)
Adds an interface to a router created on Arista HW router. This deals with both IPv6 and IPv4 configurations.
def initiate_tasks(self): """ Loads all tasks using `TaskLoader` from respective configuration option """ self.tasks_classes = TaskLoader().load_tasks( paths=self.configuration[Configuration.ALGORITHM][Configuration.TASKS][Configuration.PATHS])
Loads all tasks using `TaskLoader` from respective configuration option
def create_snapshot(self, datacenter_id, volume_id, name=None, description=None): """ Creates a snapshot of the specified volume. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param volume_id: The unique ID of the volume. :type volume_id: ``str`` :param name: The name given to the volume. :type name: ``str`` :param description: The description given to the volume. :type description: ``str`` """ data = {'name': name, 'description': description} response = self._perform_request( '/datacenters/%s/volumes/%s/create-snapshot' % ( datacenter_id, volume_id), method='POST-ACTION-JSON', data=urlencode(data)) return response
Creates a snapshot of the specified volume. :param datacenter_id: The unique ID of the data center. :type datacenter_id: ``str`` :param volume_id: The unique ID of the volume. :type volume_id: ``str`` :param name: The name given to the volume. :type name: ``str`` :param description: The description given to the volume. :type description: ``str``
def substitute_minor_for_major(progression, substitute_index, ignore_suffix=False): """Substitute minor chords for its major equivalent. 'm' and 'm7' suffixes recognized, and ['II', 'III', 'VI'] if there is no suffix. Examples: >>> substitute_minor_for_major(['VI'], 0) ['I'] >>> substitute_minor_for_major(['Vm'], 0) ['bVIIM'] >>> substitute_minor_for_major(['VIm7'], 0) ['IM7'] """ (roman, acc, suff) = parse_string(progression[substitute_index]) res = [] # Minor to major substitution if suff == 'm' or suff == 'm7' or suff == '' and roman in ['II', 'III', 'VI' ] or ignore_suffix: n = skip(roman, 2) a = interval_diff(roman, n, 3) + acc if suff == 'm' or ignore_suffix: res.append(tuple_to_string((n, a, 'M'))) elif suff == 'm7' or ignore_suffix: res.append(tuple_to_string((n, a, 'M7'))) elif suff == '' or ignore_suffix: res.append(tuple_to_string((n, a, ''))) return res
Substitute minor chords for its major equivalent. 'm' and 'm7' suffixes recognized, and ['II', 'III', 'VI'] if there is no suffix. Examples: >>> substitute_minor_for_major(['VI'], 0) ['I'] >>> substitute_minor_for_major(['Vm'], 0) ['bVIIM'] >>> substitute_minor_for_major(['VIm7'], 0) ['IM7']
def run(path, code, params=None, ignore=None, select=None, **meta): """Pylint code checking. :return list: List of errors. """ logger.debug('Start pylint') clear_cache = params.pop('clear_cache', False) if clear_cache: MANAGER.astroid_cache.clear() class Reporter(BaseReporter): def __init__(self): self.errors = [] super(Reporter, self).__init__() def _display(self, layout): pass def handle_message(self, msg): self.errors.append(dict( lnum=msg.line, col=msg.column, text="%s %s" % (msg.msg_id, msg.msg), type=msg.msg_id[0] )) params = _Params(ignore=ignore, select=select, params=params) logger.debug(params) reporter = Reporter() try: Run([path] + params.to_attrs(), reporter=reporter, do_exit=False) except TypeError: # support pylint<2.0 # see https://github.com/PyCQA/pylint/commit/4210ef9b8c5d9e7b33ff0542683f18b8031193fa import pylint if pylint.__version__.split('.')[0] != '1': raise Run([path] + params.to_attrs(), reporter=reporter, exit=False) return reporter.errors
Pylint code checking. :return list: List of errors.
def update_redirect(self): """ Call it on your own endpoint's to update the back history navigation. If you bypass it, the next submit or back will go over it. """ page_history = Stack(session.get("page_history", [])) page_history.push(request.url) session["page_history"] = page_history.to_json()
Call it on your own endpoint's to update the back history navigation. If you bypass it, the next submit or back will go over it.
def get_response(self, environ=None): """Get a list of headers.""" response = super(SameContentException, self).get_response( environ=environ ) if self.etag is not None: response.set_etag(self.etag) if self.last_modified is not None: response.headers['Last-Modified'] = http_date(self.last_modified) return response
Get a list of headers.
def get_catfact(): """Get a cat fact from catfact.ninja and return it as a string. Functions for Soundhound, Google, IBM Watson, or other APIs can be added to create the desired functionality into this bot. """ response = requests.get(CAT_FACTS_URL, verify=False) response.raise_for_status() json_data = response.json() return json_data['fact']
Get a cat fact from catfact.ninja and return it as a string. Functions for Soundhound, Google, IBM Watson, or other APIs can be added to create the desired functionality into this bot.
def compute_transformed(context): """Compute transformed key for opening database""" key_composite = compute_key_composite( password=context._._.password, keyfile=context._._.keyfile ) kdf_parameters = context._.header.value.dynamic_header.kdf_parameters.data.dict if context._._.transformed_key is not None: transformed_key = context._._.transformed_key elif kdf_parameters['$UUID'].value == kdf_uuids['argon2']: transformed_key = argon2.low_level.hash_secret_raw( secret=key_composite, salt=kdf_parameters['S'].value, hash_len=32, type=argon2.low_level.Type.D, time_cost=kdf_parameters['I'].value, memory_cost=kdf_parameters['M'].value // 1024, parallelism=kdf_parameters['P'].value, version=kdf_parameters['V'].value ) elif kdf_parameters['$UUID'].value == kdf_uuids['aeskdf']: transformed_key = aes_kdf( kdf_parameters['S'].value, kdf_parameters['R'].value, key_composite ) else: raise Exception('Unsupported key derivation method') return transformed_key
Compute transformed key for opening database
def as_base_units(self): """ Converts all units to base SI units, including derived units. Returns: (base_units_dict, scaling factor). base_units_dict will not contain any constants, which are gathered in the scaling factor. """ b = collections.defaultdict(int) factor = 1 for k, v in self.items(): derived = False for d in DERIVED_UNITS.values(): if k in d: for k2, v2 in d[k].items(): if isinstance(k2, Number): factor *= k2 ** (v2 * v) else: b[k2] += v2 * v derived = True break if not derived: si, f = _get_si_unit(k) b[si] += v factor *= f ** v return {k: v for k, v in b.items() if v != 0}, factor
Converts all units to base SI units, including derived units. Returns: (base_units_dict, scaling factor). base_units_dict will not contain any constants, which are gathered in the scaling factor.
def stream(self, sha): """For now, all lookup is done by git itself""" hexsha, typename, size, stream = self._git.stream_object_data(bin_to_hex(sha)) return OStream(hex_to_bin(hexsha), typename, size, stream)
For now, all lookup is done by git itself
def _enable_lock(func): """ The decorator for ensuring thread-safe when current cache instance is concurrent status. """ @functools.wraps(func) def wrapper(*args, **kwargs): self = args[0] if self.is_concurrent: only_read = kwargs.get('only_read') if only_read is None or only_read: with self._rwlock: return func(*args, **kwargs) else: self._rwlock.acquire_writer() try: return func(*args, **kwargs) finally: self._rwlock.release() else: return func(*args, **kwargs) return wrapper
The decorator for ensuring thread-safe when current cache instance is concurrent status.
def save_post(self, title, text, user_id, tags, draft=False, post_date=None, last_modified_date=None, meta_data=None, post_id=None): """ Persist the blog post data. If ``post_id`` is ``None`` or ``post_id`` is invalid, the post must be inserted into the storage. If ``post_id`` is a valid id, then the data must be updated. :param title: The title of the blog post :type title: str :param text: The text of the blog post :type text: str :param user_id: The user identifier :type user_id: str :param tags: A list of tags :type tags: list :param draft: If the post is a draft of if needs to be published. :type draft: bool :param post_date: (Optional) The date the blog was posted (default datetime.datetime.utcnow()) :type post_date: datetime.datetime :param last_modified_date: (Optional) The date when blog was last modified (default datetime.datetime.utcnow()) :type last_modified_date: datetime.datetime :param meta_data: The meta data for the blog post :type meta_data: dict :param post_id: The post identifier. This should be ``None`` for an insert call, and a valid value for update. :type post_id: int :return: The post_id value, in case of a successful insert or update. Return ``None`` if there were errors. """ raise NotImplementedError("This method needs to be implemented by " "the inheriting class")
Persist the blog post data. If ``post_id`` is ``None`` or ``post_id`` is invalid, the post must be inserted into the storage. If ``post_id`` is a valid id, then the data must be updated. :param title: The title of the blog post :type title: str :param text: The text of the blog post :type text: str :param user_id: The user identifier :type user_id: str :param tags: A list of tags :type tags: list :param draft: If the post is a draft of if needs to be published. :type draft: bool :param post_date: (Optional) The date the blog was posted (default datetime.datetime.utcnow()) :type post_date: datetime.datetime :param last_modified_date: (Optional) The date when blog was last modified (default datetime.datetime.utcnow()) :type last_modified_date: datetime.datetime :param meta_data: The meta data for the blog post :type meta_data: dict :param post_id: The post identifier. This should be ``None`` for an insert call, and a valid value for update. :type post_id: int :return: The post_id value, in case of a successful insert or update. Return ``None`` if there were errors.
def _get_pretty_table(self, indent: int = 0, align: int = ALIGN_CENTER, border: bool = False) -> PrettyTable: """ Returns the table format of the scheme, i.e.: <table name> +----------------+---------------- | <field1> | <field2>... +----------------+---------------- | value1(field1) | value1(field2) | value2(field1) | value2(field2) | value3(field1) | value3(field2) +----------------+---------------- """ rows = self.rows columns = self.columns # Add the column color. if self._headers_color != Printer.NORMAL and len(rows) > 0 and len(columns) > 0: # We need to copy the lists so that we wont insert colors in the original ones. rows[0] = rows[0][:] columns = columns[:] columns[0] = self._headers_color + columns[0] # Write the table itself in NORMAL color. rows[0][0] = Printer.NORMAL + str(rows[0][0]) table = PrettyTable(columns, border=border, max_width=get_console_width() - indent) table.align = self._ALIGN_DICTIONARY[align] for row in rows: table.add_row(row) # Set the max width according to the columns size dict, or by default size limit when columns were not provided. for column, max_width in self._column_size_map.items(): table.max_width[column] = max_width return table
Returns the table format of the scheme, i.e.: <table name> +----------------+---------------- | <field1> | <field2>... +----------------+---------------- | value1(field1) | value1(field2) | value2(field1) | value2(field2) | value3(field1) | value3(field2) +----------------+----------------
def wait(self): """Waits for all submitted jobs to complete.""" logging.info("waiting for {} jobs to complete".format(len(self.submissions))) while not self.shutdown: time.sleep(1)
Waits for all submitted jobs to complete.
def get_logistic_regression_coefs_l2(self, category, clf=RidgeClassifierCV()): ''' Computes l2-penalized logistic regression score. Parameters ---------- category : str category name to score category : str category name to score Returns ------- (coefficient array, accuracy, majority class baseline accuracy) ''' try: from sklearn.cross_validation import cross_val_predict except: from sklearn.model_selection import cross_val_predict y = self._get_mask_from_category(category) X = TfidfTransformer().fit_transform(self._X) clf.fit(X, y) y_hat = cross_val_predict(clf, X, y) acc, baseline = self._get_accuracy_and_baseline_accuracy(y, y_hat) return clf.coef_[0], acc, baseline
Computes l2-penalized logistic regression score. Parameters ---------- category : str category name to score category : str category name to score Returns ------- (coefficient array, accuracy, majority class baseline accuracy)
def _add_err(self, exinfo): """ Sets the error on this MultiResult. Will be ignored if an error is already set. :param exinfo: Return value from ``sys.exc_info()`` """ if self._err: return self._err = exinfo self.all_ok = False
Sets the error on this MultiResult. Will be ignored if an error is already set. :param exinfo: Return value from ``sys.exc_info()``
def continuous_binary_search(f, lo, hi, gap=1e-4): """Binary search for a function :param f: boolean monotone function with f(hi) = True :param int lo: :param int hi: with hi >= lo :param float gap: :returns: first value x in [lo,hi] such that f(x), x is computed up to some precision :complexity: `O(log((hi-lo)/gap))` """ while hi - lo > gap: # in other languages you can force floating division by using 2.0 mid = (lo + hi) / 2. if f(mid): hi = mid else: lo = mid return lo
Binary search for a function :param f: boolean monotone function with f(hi) = True :param int lo: :param int hi: with hi >= lo :param float gap: :returns: first value x in [lo,hi] such that f(x), x is computed up to some precision :complexity: `O(log((hi-lo)/gap))`
def watch(self, flag): """Whether or not the Template is being watched.""" lib.EnvSetDeftemplateWatch(self._env, int(flag), self._tpl)
Whether or not the Template is being watched.
def _multi_take(self, tup): """ Create the indexers for the passed tuple of keys, and execute the take operation. This allows the take operation to be executed all at once - rather than once for each dimension - improving efficiency. Parameters ---------- tup : tuple Tuple of indexers, one per axis Returns ------- values: same type as the object being indexed """ # GH 836 o = self.obj d = {axis: self._get_listlike_indexer(key, axis) for (key, axis) in zip(tup, o._AXIS_ORDERS)} return o._reindex_with_indexers(d, copy=True, allow_dups=True)
Create the indexers for the passed tuple of keys, and execute the take operation. This allows the take operation to be executed all at once - rather than once for each dimension - improving efficiency. Parameters ---------- tup : tuple Tuple of indexers, one per axis Returns ------- values: same type as the object being indexed
def write_Bar(file, bar, bpm=120, repeat=0, verbose=False): """Write a mingus.Bar to a MIDI file. Both the key and the meter are written to the file as well. """ m = MidiFile() t = MidiTrack(bpm) m.tracks = [t] while repeat >= 0: t.play_Bar(bar) repeat -= 1 return m.write_file(file, verbose)
Write a mingus.Bar to a MIDI file. Both the key and the meter are written to the file as well.
def score(infile, outfile, classifier, xgb_autotune, apply_weights, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test): """ Conduct semi-supervised learning and error-rate estimation for MS1, MS2 and transition-level data. """ if outfile is None: outfile = infile else: outfile = outfile # Prepare XGBoost-specific parameters xgb_hyperparams = {'autotune': xgb_autotune, 'autotune_num_rounds': 10, 'num_boost_round': 100, 'early_stopping_rounds': 10, 'test_size': 0.33} xgb_params = {'eta': 0.3, 'gamma': 0, 'max_depth': 6, 'min_child_weight': 1, 'subsample': 1, 'colsample_bytree': 1, 'colsample_bylevel': 1, 'colsample_bynode': 1, 'lambda': 1, 'alpha': 0, 'scale_pos_weight': 1, 'silent': 1, 'objective': 'binary:logitraw', 'nthread': 1, 'eval_metric': 'auc'} xgb_params_space = {'eta': hp.uniform('eta', 0.0, 0.3), 'gamma': hp.uniform('gamma', 0.0, 0.5), 'max_depth': hp.quniform('max_depth', 2, 8, 1), 'min_child_weight': hp.quniform('min_child_weight', 1, 5, 1), 'subsample': 1, 'colsample_bytree': 1, 'colsample_bylevel': 1, 'colsample_bynode': 1, 'lambda': hp.uniform('lambda', 0.0, 1.0), 'alpha': hp.uniform('alpha', 0.0, 1.0), 'scale_pos_weight': 1.0, 'silent': 1, 'objective': 'binary:logitraw', 'nthread': 1, 'eval_metric': 'auc'} if not apply_weights: PyProphetLearner(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test).run() else: PyProphetWeightApplier(infile, outfile, classifier, xgb_hyperparams, xgb_params, xgb_params_space, xeval_fraction, xeval_num_iter, ss_initial_fdr, ss_iteration_fdr, ss_num_iter, ss_main_score, group_id, parametric, pfdr, pi0_lambda, pi0_method, pi0_smooth_df, pi0_smooth_log_pi0, lfdr_truncate, lfdr_monotone, lfdr_transformation, lfdr_adj, lfdr_eps, level, ipf_max_peakgroup_rank, ipf_max_peakgroup_pep, ipf_max_transition_isotope_overlap, ipf_min_transition_sn, tric_chromprob, threads, test, apply_weights).run()
Conduct semi-supervised learning and error-rate estimation for MS1, MS2 and transition-level data.
def get_container_metadata(self, container, prefix=None): """ Returns a dictionary containing the metadata for the container. """ return self._manager.get_metadata(container, prefix=prefix)
Returns a dictionary containing the metadata for the container.
def print_topics(self, Nwords=10): """ Print the top ``Nwords`` words for each topic. """ print('Topic\tTop %i words' % Nwords) for k, words in self.list_topics(Nwords): print(unicode(k).ljust(3) + '\t' + ' '.join(list(zip(*words))[0]))
Print the top ``Nwords`` words for each topic.
def absstart(self): """Returns the absolute start of the element by including docstrings outside of the element definition if applicable.""" if hasattr(self, "docstart") and self.docstart > 0: return self.docstart else: return self.start
Returns the absolute start of the element by including docstrings outside of the element definition if applicable.
def main(mash_output, hash_cutoff, sample_id, assembly_file): """ Main function that allows to dump a mash dist txt file to a json file Parameters ---------- mash_output: str A string with the input file. hash_cutoff: str the percentage cutoff for the percentage of shared hashes between query and plasmid in database that is allowed for the plasmid to be reported to the results outputs sample_id: str The name of the sample. """ input_f = open(mash_output, "r") master_dict = {} for line in input_f: tab_split = line.split("\t") current_seq = tab_split[1].strip() ref_accession = "_".join(tab_split[0].strip().split("_")[0:3]) mash_dist = tab_split[2].strip() hashes_list = tab_split[-1].strip().split("/") # creates a percentage of the shared hashes between the sample and the # reference perc_hashes = float(hashes_list[0]) / float(hashes_list[1]) # if ref_accession already in dict, i.e., if the same accession number # matches more than one contig. if ref_accession in master_dict.keys(): current_seq += ", {}".format(master_dict[ref_accession][-1]) # assures that only the hashes with a given shared percentage are # reported to json file if perc_hashes > float(hash_cutoff): master_dict[ref_accession] = [ round(1 - float(mash_dist), 2), round(perc_hashes, 2), current_seq ] # assures that file is closed in last iteration of the loop send_to_output(master_dict, mash_output, sample_id, assembly_file)
Main function that allows to dump a mash dist txt file to a json file Parameters ---------- mash_output: str A string with the input file. hash_cutoff: str the percentage cutoff for the percentage of shared hashes between query and plasmid in database that is allowed for the plasmid to be reported to the results outputs sample_id: str The name of the sample.
def crossover_with(self, other, points=2): """Perform 2-point crossover on this bit condition and another of the same length, returning the two resulting children. Usage: offspring1, offspring2 = condition1.crossover_with(condition2) Arguments: other: A second BitCondition of the same length as this one. points: An int, the number of crossover points of the crossover operation. Return: A tuple (condition1, condition2) of BitConditions, where the value at each position of this BitCondition and the other is preserved in one or the other of the two resulting conditions. """ assert isinstance(other, BitCondition) assert len(self) == len(other) template = BitString.crossover_template(len(self), points) inv_template = ~template bits1 = (self._bits & template) | (other._bits & inv_template) mask1 = (self._mask & template) | (other._mask & inv_template) bits2 = (self._bits & inv_template) | (other._bits & template) mask2 = (self._mask & inv_template) | (other._mask & template) # Convert the modified sequences back into BitConditions return type(self)(bits1, mask1), type(self)(bits2, mask2)
Perform 2-point crossover on this bit condition and another of the same length, returning the two resulting children. Usage: offspring1, offspring2 = condition1.crossover_with(condition2) Arguments: other: A second BitCondition of the same length as this one. points: An int, the number of crossover points of the crossover operation. Return: A tuple (condition1, condition2) of BitConditions, where the value at each position of this BitCondition and the other is preserved in one or the other of the two resulting conditions.
def clear(zpool, device=None): ''' Clears device errors in a pool. .. warning:: The device must not be part of an active pool configuration. zpool : string name of storage pool device : string (optional) specific device to clear .. versionadded:: 2018.3.1 CLI Example: .. code-block:: bash salt '*' zpool.clear mypool salt '*' zpool.clear mypool /path/to/dev ''' ## Configure pool # NOTE: initialize the defaults target = [] # NOTE: append the pool name and specifications target.append(zpool) target.append(device) ## clear storage pool errors res = __salt__['cmd.run_all']( __utils__['zfs.zpool_command']( command='clear', target=target, ), python_shell=False, ) return __utils__['zfs.parse_command_result'](res, 'cleared')
Clears device errors in a pool. .. warning:: The device must not be part of an active pool configuration. zpool : string name of storage pool device : string (optional) specific device to clear .. versionadded:: 2018.3.1 CLI Example: .. code-block:: bash salt '*' zpool.clear mypool salt '*' zpool.clear mypool /path/to/dev
def _get_seqprop_to_seqprop_alignment(self, seqprop1, seqprop2): """Return the alignment stored in self.sequence_alignments given a seqprop + another seqprop""" if isinstance(seqprop1, str): seqprop1_id = seqprop1 else: seqprop1_id = seqprop1.id if isinstance(seqprop2, str): seqprop2_id = seqprop2 else: seqprop2_id = seqprop2.id aln_id = '{}_{}'.format(seqprop1_id, seqprop2_id) if self.sequence_alignments.has_id(aln_id): alignment = self.sequence_alignments.get_by_id(aln_id) return alignment else: raise ValueError('{}: sequence alignment not found, please run the alignment first'.format(aln_id))
Return the alignment stored in self.sequence_alignments given a seqprop + another seqprop
def clip_image(image, clip_min, clip_max): """ Clip an image, or an image batch, with upper and lower threshold. """ return np.minimum(np.maximum(clip_min, image), clip_max)
Clip an image, or an image batch, with upper and lower threshold.
def last_in_date_group(df, data_query_cutoff_times, assets, reindex=True, have_sids=True, extra_groupers=None): """ Determine the last piece of information known on each date in the date index for each group. Input df MUST be sorted such that the correct last item is chosen from each group. Parameters ---------- df : pd.DataFrame The DataFrame containing the data to be grouped. Must be sorted so that the correct last item is chosen from each group. data_query_cutoff_times : pd.DatetimeIndex The dates to use for grouping and reindexing. assets : pd.Int64Index The assets that should be included in the column multiindex. reindex : bool Whether or not the DataFrame should be reindexed against the date index. This will add back any dates to the index that were grouped away. have_sids : bool Whether or not the DataFrame has sids. If it does, they will be used in the groupby. extra_groupers : list of str Any extra field names that should be included in the groupby. Returns ------- last_in_group : pd.DataFrame A DataFrame with dates as the index and fields used in the groupby as levels of a multiindex of columns. """ idx = [data_query_cutoff_times[data_query_cutoff_times.searchsorted( df[TS_FIELD_NAME].values, )]] if have_sids: idx += [SID_FIELD_NAME] if extra_groupers is None: extra_groupers = [] idx += extra_groupers last_in_group = df.drop(TS_FIELD_NAME, axis=1).groupby( idx, sort=False, ).last() # For the number of things that we're grouping by (except TS), unstack # the df. Done this way because of an unresolved pandas bug whereby # passing a list of levels with mixed dtypes to unstack causes the # resulting DataFrame to have all object-type columns. for _ in range(len(idx) - 1): last_in_group = last_in_group.unstack(-1) if reindex: if have_sids: cols = last_in_group.columns last_in_group = last_in_group.reindex( index=data_query_cutoff_times, columns=pd.MultiIndex.from_product( tuple(cols.levels[0:len(extra_groupers) + 1]) + (assets,), names=cols.names, ), ) else: last_in_group = last_in_group.reindex(data_query_cutoff_times) return last_in_group
Determine the last piece of information known on each date in the date index for each group. Input df MUST be sorted such that the correct last item is chosen from each group. Parameters ---------- df : pd.DataFrame The DataFrame containing the data to be grouped. Must be sorted so that the correct last item is chosen from each group. data_query_cutoff_times : pd.DatetimeIndex The dates to use for grouping and reindexing. assets : pd.Int64Index The assets that should be included in the column multiindex. reindex : bool Whether or not the DataFrame should be reindexed against the date index. This will add back any dates to the index that were grouped away. have_sids : bool Whether or not the DataFrame has sids. If it does, they will be used in the groupby. extra_groupers : list of str Any extra field names that should be included in the groupby. Returns ------- last_in_group : pd.DataFrame A DataFrame with dates as the index and fields used in the groupby as levels of a multiindex of columns.
def get_name_init(self, name): """Get initial name of symbol. """ self._register_name(name) return self._var_name_mappers[name].get_init()
Get initial name of symbol.
def get_composition_lookup_session_for_repository(self, repository_id, proxy): """Gets the OsidSession associated with the composition lookup service for the given repository. arg: repository_id (osid.id.Id): the Id of the repository arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionLookupSession) - the new CompositionLookupSession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_lookup() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_composition_lookup() and supports_visible_federation() are true. """ if repository_id is None: raise NullArgument() if not self.supports_composition_lookup(): raise Unimplemented() try: from . import sessions except ImportError: raise # OperationFailed() proxy = self._convert_proxy(proxy) try: session = sessions.CompositionLookupSession(repository_id, proxy, runtime=self._runtime) except AttributeError: raise # OperationFailed() return session
Gets the OsidSession associated with the composition lookup service for the given repository. arg: repository_id (osid.id.Id): the Id of the repository arg proxy (osid.proxy.Proxy): a proxy return: (osid.repository.CompositionLookupSession) - the new CompositionLookupSession raise: NotFound - repository_id not found raise: NullArgument - repository_id is null raise: OperationFailed - unable to complete request raise: Unimplemented - supports_composition_lookup() or supports_visible_federation() is false compliance: optional - This method must be implemented if supports_composition_lookup() and supports_visible_federation() are true.
def _open(self, mode='r'): """Open the password file in the specified mode """ open_file = None writeable = 'w' in mode or 'a' in mode or '+' in mode try: # NOTE: currently the MemOpener does not split off any filename # which causes errors on close() # so we add a dummy name and open it separately if (self.filename.startswith('mem://') or self.filename.startswith('ram://')): open_file = fs.opener.fsopendir(self.filename).open('kr.cfg', mode) else: if not hasattr(self, '_pyfs'): # reuse the pyfilesystem and path self._pyfs, self._path = fs.opener.opener.parse( self.filename, writeable=writeable) # cache if permitted if self._cache_timeout is not None: self._pyfs = fs.remote.CacheFS( self._pyfs, cache_timeout=self._cache_timeout) open_file = self._pyfs.open(self._path, mode) except fs.errors.ResourceNotFoundError: if self._can_create: segments = fs.opener.opener.split_segments(self.filename) if segments: # this seems broken, but pyfilesystem uses it, so we must fs_name, credentials, url1, url2, path = segments.groups() assert fs_name, 'Should be a remote filesystem' host = '' # allow for domain:port if ':' in url2: split_url2 = url2.split('/', 1) if len(split_url2) > 1: url2 = split_url2[1] else: url2 = '' host = split_url2[0] pyfs = fs.opener.opener.opendir( '%s://%s' % (fs_name, host)) # cache if permitted if self._cache_timeout is not None: pyfs = fs.remote.CacheFS( pyfs, cache_timeout=self._cache_timeout) # NOTE: fs.path.split does not function in the same # way os os.path.split... at least under windows url2_path, url2_filename = os.path.split(url2) if url2_path and not pyfs.exists(url2_path): pyfs.makedir(url2_path, recursive=True) else: # assume local filesystem full_url = fs.opener._expand_syspath(self.filename) # NOTE: fs.path.split does not function in the same # way os os.path.split... at least under windows url2_path, url2 = os.path.split(full_url) pyfs = fs.osfs.OSFS(url2_path) try: # reuse the pyfilesystem and path self._pyfs = pyfs self._path = url2 return pyfs.open(url2, mode) except fs.errors.ResourceNotFoundError: if writeable: raise else: pass # NOTE: ignore read errors as the underlying caller can fail safely if writeable: raise else: pass return open_file
Open the password file in the specified mode
def _heartbeat_manager(self): """ Heartbeat DAG file processor and start it if it is not alive. :return: """ if self._process and not self._process.is_alive() and not self.done: self.start()
Heartbeat DAG file processor and start it if it is not alive. :return:
def _make_readline_peeker(self): """Make a readline-like function which peeks into the source.""" counter = itertools.count(0) def readline(): try: return self._peek_buffer(next(counter)) except StopIteration: return '' return readline
Make a readline-like function which peeks into the source.
def parse_data(self, data, msg_signature=None, timestamp=None, nonce=None): """ 解析微信服务器发送过来的数据并保存类中 :param data: HTTP Request 的 Body 数据 :param msg_signature: EncodingAESKey 的 msg_signature :param timestamp: EncodingAESKey 用时间戳 :param nonce: EncodingAESKey 用随机数 :raises ParseError: 解析微信服务器数据错误, 数据不合法 """ result = {} if isinstance(data, six.text_type): # unicode to str(PY2), str to bytes(PY3) data = data.encode('utf-8') if self.conf.encrypt_mode == 'safe': if not (msg_signature and timestamp and nonce): raise ParseError('must provide msg_signature/timestamp/nonce in safe encrypt mode') data = self.conf.crypto.decrypt_message( msg=data, msg_signature=msg_signature, timestamp=timestamp, nonce=nonce, ) try: xml = XMLStore(xmlstring=data) except Exception: raise ParseError() result = xml.xml2dict result['raw'] = data result['type'] = result.pop('MsgType').lower() message_type = MESSAGE_TYPES.get(result['type'], UnknownMessage) self.__message = message_type(result) self.__is_parse = True
解析微信服务器发送过来的数据并保存类中 :param data: HTTP Request 的 Body 数据 :param msg_signature: EncodingAESKey 的 msg_signature :param timestamp: EncodingAESKey 用时间戳 :param nonce: EncodingAESKey 用随机数 :raises ParseError: 解析微信服务器数据错误, 数据不合法
def _map_type_to_dict(self, type_name): """ Maps a an instance type representation string (e.g. 'RESULT') to the corresponding dictionary in root. """ root = self._root_instance if type_name == RESULT: return root._results elif type_name == PARAMETER: return root._parameters elif type_name == DERIVED_PARAMETER: return root._derived_parameters elif type_name == CONFIG: return root._config elif type_name == LEAF: return root._other_leaves else: raise RuntimeError('You shall not pass!')
Maps a an instance type representation string (e.g. 'RESULT') to the corresponding dictionary in root.
def unix_ts(dtval): '''Convert datetime into a unix timestamp. This is the equivalent to Python 3's int(datetime.timestamp()). :param dt: datetime to convert ''' epoch = datetime(1970, 1, 1, 0, 0, tzinfo=tzutc()) delta = (dtval - epoch) return delta.days * 24 * 3600 + delta.seconds
Convert datetime into a unix timestamp. This is the equivalent to Python 3's int(datetime.timestamp()). :param dt: datetime to convert
def Lexicon(**rules): """Create a dictionary mapping symbols to alternative words. >>> Lexicon(Art = "the | a | an") {'Art': ['the', 'a', 'an']} """ for (lhs, rhs) in rules.items(): rules[lhs] = [word.strip() for word in rhs.split('|')] return rules
Create a dictionary mapping symbols to alternative words. >>> Lexicon(Art = "the | a | an") {'Art': ['the', 'a', 'an']}
def erfcc(x): """ Returns the complementary error function erfc(x) with fractional error everywhere less than 1.2e-7. Adapted from Numerical Recipies. Usage: lerfcc(x) """ z = abs(x) t = 1.0 / (1.0 + 0.5 * z) ans = t * math.exp( -z * z - 1.26551223 + t * (1.00002368 + t * (0.37409196 + t * (0.09678418 + t * (-0.18628806 + t * (0.27886807 + t * (-1.13520398 + t * (1.48851587 + t * (-0.82215223 + t * 0.17087277))))))))) if x >= 0: return ans else: return 2.0 - ans
Returns the complementary error function erfc(x) with fractional error everywhere less than 1.2e-7. Adapted from Numerical Recipies. Usage: lerfcc(x)
async def _wrap_gen(self, ID: str): """异步迭代器包装. Parameters: ID (str): - 任务ID Yield: (Any): - 从异步迭代器结果队列中获取的结果 Raise: (StopAsyncIteration): - 异步迭代器终止时抛出该异常 """ while True: result = await self._gens_queue[ID].get() if isinstance(result, StopAsyncIteration): del self._gens_queue[ID] break else: yield result
异步迭代器包装. Parameters: ID (str): - 任务ID Yield: (Any): - 从异步迭代器结果队列中获取的结果 Raise: (StopAsyncIteration): - 异步迭代器终止时抛出该异常
def showMenu(self, point=None): """ Displays the menu for this view widget. :param point | <QPoint> """ menu = self.createMenu(self) menu.exec_(QtGui.QCursor.pos()) menu.deleteLater()
Displays the menu for this view widget. :param point | <QPoint>
def cpp_best_split_full_model(X, Uy, C, S, U, noderange, delta, save_memory=False): """wrappe calling cpp splitting function""" return CSP.best_split_full_model(X, Uy, C, S, U, noderange, delta)
wrappe calling cpp splitting function
def split_leading_indent(line, max_indents=None): """Split line into leading indent and main.""" indent = "" while ( (max_indents is None or max_indents > 0) and line.startswith((openindent, closeindent)) ) or line.lstrip() != line: if max_indents is not None and line.startswith((openindent, closeindent)): max_indents -= 1 indent += line[0] line = line[1:] return indent, line
Split line into leading indent and main.
def closure(self): """ Returns a new `Independencies()`-object that additionally contains those `IndependenceAssertions` that are implied by the the current independencies (using with the `semi-graphoid axioms <https://en.wikipedia.org/w/index.php?title=Conditional_independence&oldid=708760689#Rules_of_conditional_independence>`_; see (Pearl, 1989, `Conditional Independence and its representations <http://www.cs.technion.ac.il/~dang/journal_papers/pearl1989conditional.pdf>`_)). Might be very slow if more than six variables are involved. Examples -------- >>> from pgmpy.independencies import Independencies >>> ind1 = Independencies(('A', ['B', 'C'], 'D')) >>> ind1.closure() (A _|_ B | D, C) (A _|_ B, C | D) (A _|_ B | D) (A _|_ C | D, B) (A _|_ C | D) >>> ind2 = Independencies(('W', ['X', 'Y', 'Z'])) >>> ind2.closure() (W _|_ Y) (W _|_ Y | X) (W _|_ Z | Y) (W _|_ Z, X, Y) (W _|_ Z) (W _|_ Z, X) (W _|_ X, Y) (W _|_ Z | X) (W _|_ Z, Y | X) [..] """ def single_var(var): "Checks if var represents a single variable" if not hasattr(var, '__iter__'): return True else: return len(var) == 1 def sg0(ind): "Symmetry rule: 'X ⟂ Y | Z' -> 'Y ⟂ X | Z'" return IndependenceAssertion(ind.event2, ind.event1, ind.event3) # since X⟂Y|Z == Y⟂X|Z in pgmpy, sg0 (symmetry) is not used as an axiom/rule. # instead we use a decorator for the other axioms to apply them on both sides def apply_left_and_right(func): def symmetric_func(*args): if len(args) == 1: return func(args[0]) + func(sg0(args[0])) if len(args) == 2: return (func(*args) + func(args[0], sg0(args[1])) + func(sg0(args[0]), args[1]) + func(sg0(args[0]), sg0(args[1]))) return symmetric_func @apply_left_and_right def sg1(ind): "Decomposition rule: 'X ⟂ Y,W | Z' -> 'X ⟂ Y | Z', 'X ⟂ W | Z'" if single_var(ind.event2): return [] else: return [IndependenceAssertion(ind.event1, ind.event2 - {elem}, ind.event3) for elem in ind.event2] @apply_left_and_right def sg2(ind): "Weak Union rule: 'X ⟂ Y,W | Z' -> 'X ⟂ Y | W,Z', 'X ⟂ W | Y,Z' " if single_var(ind.event2): return [] else: return [IndependenceAssertion(ind.event1, ind.event2 - {elem}, {elem} | ind.event3) for elem in ind.event2] @apply_left_and_right def sg3(ind1, ind2): "Contraction rule: 'X ⟂ W | Y,Z' & 'X ⟂ Y | Z' -> 'X ⟂ W,Y | Z'" if ind1.event1 != ind2.event1: return [] Y = ind2.event2 Z = ind2.event3 Y_Z = ind1.event3 if Y < Y_Z and Z < Y_Z and Y.isdisjoint(Z): return [IndependenceAssertion(ind1.event1, ind1.event2 | Y, Z)] else: return [] # apply semi-graphoid axioms as long as new independencies are found. all_independencies = set() new_inds = set(self.independencies) while new_inds: new_pairs = (set(itertools.permutations(new_inds, 2)) | set(itertools.product(new_inds, all_independencies)) | set(itertools.product(all_independencies, new_inds))) all_independencies |= new_inds new_inds = set(sum([sg1(ind) for ind in new_inds] + [sg2(ind) for ind in new_inds] + [sg3(*inds) for inds in new_pairs], [])) new_inds -= all_independencies return Independencies(*list(all_independencies))
Returns a new `Independencies()`-object that additionally contains those `IndependenceAssertions` that are implied by the the current independencies (using with the `semi-graphoid axioms <https://en.wikipedia.org/w/index.php?title=Conditional_independence&oldid=708760689#Rules_of_conditional_independence>`_; see (Pearl, 1989, `Conditional Independence and its representations <http://www.cs.technion.ac.il/~dang/journal_papers/pearl1989conditional.pdf>`_)). Might be very slow if more than six variables are involved. Examples -------- >>> from pgmpy.independencies import Independencies >>> ind1 = Independencies(('A', ['B', 'C'], 'D')) >>> ind1.closure() (A _|_ B | D, C) (A _|_ B, C | D) (A _|_ B | D) (A _|_ C | D, B) (A _|_ C | D) >>> ind2 = Independencies(('W', ['X', 'Y', 'Z'])) >>> ind2.closure() (W _|_ Y) (W _|_ Y | X) (W _|_ Z | Y) (W _|_ Z, X, Y) (W _|_ Z) (W _|_ Z, X) (W _|_ X, Y) (W _|_ Z | X) (W _|_ Z, Y | X) [..]
def cut_gmail_quote(html_message): ''' Cuts the outermost block element with class gmail_quote. ''' gmail_quote = cssselect('div.gmail_quote', html_message) if gmail_quote and (gmail_quote[0].text is None or not RE_FWD.match(gmail_quote[0].text)): gmail_quote[0].getparent().remove(gmail_quote[0]) return True
Cuts the outermost block element with class gmail_quote.
def http_basic_auth_get_user(request): """Inspect the given request to find a logged user. If not found, the header HTTP_AUTHORIZATION is read for 'Basic Auth' login and password, and try to authenticate against default UserModel. Always return a User instance (possibly anonymous, meaning authentication failed)""" try: # If standard auth middleware already authenticated a user, use it if user_is_authenticated(request.user): return request.user except AttributeError: pass # This was grabbed from https://www.djangosnippets.org/snippets/243/ # Thanks to http://stackoverflow.com/a/1087736/1887976 if 'HTTP_AUTHORIZATION' in request.META: auth_data = request.META['HTTP_AUTHORIZATION'].split() if len(auth_data) == 2 and auth_data[0].lower() == "basic": uname, passwd = base64.b64decode(auth_data[1]).decode('utf-8').split(':') django_user = authenticate(username=uname, password=passwd) if django_user is not None: login(request, django_user) # In all cases, return the current request's user (may be anonymous user if no login succeed) try: return request.user except AttributeError: return AnonymousUser()
Inspect the given request to find a logged user. If not found, the header HTTP_AUTHORIZATION is read for 'Basic Auth' login and password, and try to authenticate against default UserModel. Always return a User instance (possibly anonymous, meaning authentication failed)
def html2groff(data, name): """Convert HTML text from cplusplus.com to Groff-formatted text.""" # Remove sidebar try: data = data[data.index('<div class="C_doc">'):] except ValueError: pass # Pre replace all for rp in pre_rps: data = re.compile(rp[0], rp[2]).sub(rp[1], data) for table in re.findall(r'<table.*?>.*?</table>', data, re.S): tbl = parse_table(escape_pre_section(table)) # Escape column with '.' as prefix tbl = re.compile(r'T{\n(\..*?)\nT}', re.S).sub(r'T{\n\\E \1\nT}', tbl) data = data.replace(table, tbl) # Replace all for rp in rps: data = re.compile(rp[0], rp[2]).sub(rp[1], data) # Upper case all section headers for st in re.findall(r'.SH .*\n', data): data = data.replace(st, st.upper()) # Add tags to member/inherited member functions # e.g. insert -> vector::insert # # .SE is a pseudo macro I created which means 'SECTION END' # The reason I use it is because I need a marker to know where section # ends. # re.findall find patterns which does not overlap, which means if I do # this: secs = re.findall(r'\n\.SH "(.+?)"(.+?)\.SH', data, re.S) # re.findall will skip the later .SH tag and thus skip the later section. # To fix this, '.SE' is used to mark the end of the section so the next # '.SH' can be find by re.findall page_type = re.search(r'\n\.SH "TYPE"\n(.+?)\n', data) if page_type and 'class' in page_type.group(1): class_name = re.search(r'\n\.SH "NAME"\n(?:.*::)?(.+?) ', data).group(1) secs = re.findall(r'\n\.SH "(.+?)"(.+?)\.SE', data, re.S) for sec, content in secs: # Member functions if ('MEMBER' in sec and 'NON-MEMBER' not in sec and 'INHERITED' not in sec and sec != 'MEMBER TYPES'): content2 = re.sub(r'\n\.IP "([^:]+?)"', r'\n.IP "%s::\1"' % class_name, content) # Replace (constructor) (destructor) content2 = re.sub(r'\(constructor\)', r'%s' % class_name, content2) content2 = re.sub(r'\(destructor\)', r'~%s' % class_name, content2) data = data.replace(content, content2) # Inherited member functions elif 'MEMBER' in sec and 'INHERITED' in sec: inherit = re.search(r'.+?INHERITED FROM (.+)', sec).group(1).lower() content2 = re.sub(r'\n\.IP "(.+)"', r'\n.IP "%s::\1"' % inherit, content) data = data.replace(content, content2) # Remove pseudo macro '.SE' data = data.replace('\n.SE', '') return data
Convert HTML text from cplusplus.com to Groff-formatted text.
def write_Composition(composition, filename, zip=False): """Create an XML file (or MXL if compressed) for a given composition.""" text = from_Composition(composition) if not zip: f = open(filename + '.xml', 'w') f.write(text) f.close() else: import zipfile import os zf = zipfile.ZipFile(filename + '.mxl', mode='w', compression=zipfile.ZIP_DEFLATED) zi = zipfile.ZipInfo('META-INF' + os.sep + 'container.xml') zi.external_attr = 0660 << 16L zf.writestr(zi, "<?xml version='1.0' encoding='UTF-8'?>" "<container><rootfiles><rootfile full-path='{0}.xml'/>" "</rootfiles></container>".format(filename)) zi = zipfile.ZipInfo(filename + '.xml') zi.external_attr = 0660 << 16L zf.writestr(zi, text) zf.close()
Create an XML file (or MXL if compressed) for a given composition.
def run(self): """ This AI simple moves the characters towards the opposite edges of the grid for 3 steps or until event halts the simulation """ x, y = 1,0 # set the direction num_steps = 0 while self.s.get_state() != 'Halted': self.s.command({'name':'walk', 'type':'move', 'direction':[x, y]}, self.a1) self.s.command({'name':'walk', 'type':'run', 'direction':[x, y+1]}, self.a2) num_steps += 1 if num_steps >= 3: break for a in self.s.agents: print(a.name, 'finished at position ', a.coords['x'], a.coords['y'])
This AI simple moves the characters towards the opposite edges of the grid for 3 steps or until event halts the simulation
def do_edit_settings(fake): """Opens legit settings in editor.""" path = resources.user.open('config.ini').name click.echo('Legit Settings:\n') for (option, _, description) in legit_settings.config_defaults: click.echo(columns([crayons.yellow(option), 25], [description, None])) click.echo("") # separate settings info from os output if fake: click.echo(crayons.red('Faked! >>> edit {}'.format(path))) else: click.edit(path)
Opens legit settings in editor.
def stop(self): """Stop stream.""" if self.stream and self.stream.session.state != STATE_STOPPED: self.stream.stop()
Stop stream.
def _to_bytes(self, data, key='', expired=None, noc=0, ncalls=0): """Serialize (and encrypt if `key` is provided) the data and represent it as string. **Parameters** :param data: any python serializable (pickable) object :param key: If the key is provided and `pycrypto` is installed, cached data will be encrypted (If `pycrypto` is not installed, this #TODO: pycrypto or something else?! parameter will be ignored). Empty string by default. :param expired: exact date when the cache will be expired; It is `None` by default :param noc: the number of allowed calls; TODO: Clarify what does it mean, exactly?!!!! :param ncalls: What is it; I don't understand!!! TODO: clarify this!!!! :type key: str :type expired: `datetime` or `None` :type noc: int :type ncalls: int :returns: serialized data :rtype: str """ data_tuple = (data, expired, noc, ncalls) if not can_encrypt and key: # TODO: Probably not only Pycrypto will be using for encryption!!! # Clarification needed warnings.warn("Pycrypto is not installed. The data will not be encrypted", UserWarning) result = encode_safely(data_tuple) elif can_encrypt and key: if PY3: cipher = AESCipher(key.encode(settings.DEFAULT_ENCODING)) else: cipher = AESCipher(key) result = cipher.encrypt(encode_safely(data_tuple)) else: result = encode_safely(data_tuple) return result
Serialize (and encrypt if `key` is provided) the data and represent it as string. **Parameters** :param data: any python serializable (pickable) object :param key: If the key is provided and `pycrypto` is installed, cached data will be encrypted (If `pycrypto` is not installed, this #TODO: pycrypto or something else?! parameter will be ignored). Empty string by default. :param expired: exact date when the cache will be expired; It is `None` by default :param noc: the number of allowed calls; TODO: Clarify what does it mean, exactly?!!!! :param ncalls: What is it; I don't understand!!! TODO: clarify this!!!! :type key: str :type expired: `datetime` or `None` :type noc: int :type ncalls: int :returns: serialized data :rtype: str
def cprint(color, prefix, message): """ prints a message in a given color :param color: the color as defined in the theme :param prefix: the prefix (a string) :param message: the message :return: """ message = message or "" prefix = prefix or "" print((Console.theme[color] + prefix + message + Console.theme['ENDC']))
prints a message in a given color :param color: the color as defined in the theme :param prefix: the prefix (a string) :param message: the message :return:
def info(name): ''' Return information about a certificate .. note:: Will output tls.cert_info if that's available, or OpenSSL text if not :param name: CommonName of cert CLI example: .. code-block:: bash salt 'gitlab.example.com' acme.info dev.example.com ''' cert_file = _cert_file(name, 'cert') # Use the salt module if available if 'tls.cert_info' in __salt__: cert_info = __salt__['tls.cert_info'](cert_file) # Strip out the extensions object contents; # these trip over our poor state output # and they serve no real purpose here anyway cert_info['extensions'] = cert_info['extensions'].keys() return cert_info # Cobble it together using the openssl binary openssl_cmd = 'openssl x509 -in {0} -noout -text'.format(cert_file) return __salt__['cmd.run'](openssl_cmd, output_loglevel='quiet')
Return information about a certificate .. note:: Will output tls.cert_info if that's available, or OpenSSL text if not :param name: CommonName of cert CLI example: .. code-block:: bash salt 'gitlab.example.com' acme.info dev.example.com
def nodeListGetString(self, list, inLine): """Build the string equivalent to the text contained in the Node list made of TEXTs and ENTITY_REFs """ if list is None: list__o = None else: list__o = list._o ret = libxml2mod.xmlNodeListGetString(self._o, list__o, inLine) return ret
Build the string equivalent to the text contained in the Node list made of TEXTs and ENTITY_REFs
def heat_process(body, message): """ This function deal with the heat notification. First, find process from customer_process that not include wildcard. if not find from customer_process, then find process from customer_process_wildcard. if not find from customer_process_wildcard, then use ternya default process. :param body: dict of openstack notification. :param message: kombu Message class :return: """ event_type = body['event_type'] process = heat_customer_process.get(event_type) if process is not None: process(body, message) else: matched = False process_wildcard = None for pattern in heat_customer_process_wildcard.keys(): if pattern.match(event_type): process_wildcard = heat_customer_process_wildcard.get(pattern) matched = True break if matched: process_wildcard(body, message) else: default_process(body, message) message.ack()
This function deal with the heat notification. First, find process from customer_process that not include wildcard. if not find from customer_process, then find process from customer_process_wildcard. if not find from customer_process_wildcard, then use ternya default process. :param body: dict of openstack notification. :param message: kombu Message class :return:
def walk_train_dirs(root_dir: str) -> Iterable[Tuple[str, Iterable[str]]]: """ Modify os.walk with the following: - return only root_dir and sub-dirs - return only training sub-dirs - stop recursion at training dirs :param root_dir: root dir to be walked :return: generator of (root_dir, training sub-dirs) pairs """ if is_train_dir(root_dir): yield '', [root_dir] return for dir_, subdirs, _ in os.walk(root_dir, topdown=True): # filter train sub-dirs train_subdirs = [subdir for subdir in subdirs if is_train_dir(path.join(dir_, subdir))] # stop the recursion at the train sub-dirs for subdir in train_subdirs: subdirs.remove(subdir) yield dir_, train_subdirs
Modify os.walk with the following: - return only root_dir and sub-dirs - return only training sub-dirs - stop recursion at training dirs :param root_dir: root dir to be walked :return: generator of (root_dir, training sub-dirs) pairs
def _remove_germline_filter(rec, name): """Check if germline based on STATUS/SS and REJECT flag. Handles VarDict, FreeBayes, MuTect, MuTect2 and VarScan. """ if _is_germline(rec): if rec.FILTER and name in rec.FILTER: return vcfutils.cyvcf_remove_filter(rec, name) elif not _is_somatic(rec): if rec.FILTER and name in rec.FILTER: return vcfutils.cyvcf_remove_filter(rec, name) return rec
Check if germline based on STATUS/SS and REJECT flag. Handles VarDict, FreeBayes, MuTect, MuTect2 and VarScan.