code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def _run_nucmer(self, ref, qry, outfile): n = pymummer.nucmer.Runner( ref, qry, outfile, min_id=self.nucmer_min_id, min_length=self.nucmer_min_length, diagdiff=self.nucmer_diagdiff, maxmatch=True, breaklen=self.nucmer_breaklen, simplify=True, verbose=self.verbose ) n.run()
Run nucmer of new assembly vs original assembly
def remove_listener(self, registration_id): try: self._listeners.pop(registration_id) return True except KeyError: return False
Removes a lifecycle listener. :param registration_id: (str), the id of the listener to be removed. :return: (bool), ``true`` if the listener is removed successfully, ``false`` otherwise.
def first_order_score(y, mean, scale, shape, skewness): return (y-mean)/float(scale*np.abs(y-mean))
GAS Laplace Update term using gradient only - native Python function Parameters ---------- y : float datapoint for the time series mean : float location parameter for the Laplace distribution scale : float scale parameter for the Laplace distribution shape : float tail thickness parameter for the Laplace distribution skewness : float skewness parameter for the Laplace distribution Returns ---------- - Score of the Laplace family
def load(self, filename): try: with open(filename, 'rb') as fp: self.counters = cPickle.load(fp) except: logging.debug("can't load counter from file: %s", filename) return False return True
Load counters to file
def get_port_bindings(container_config, client_config): port_bindings = {} if_ipv4 = client_config.interfaces if_ipv6 = client_config.interfaces_ipv6 for exposed_port, ex_port_bindings in itertools.groupby( sorted(container_config.exposes, key=_get_ex_port), _get_ex_port): bind_list = list(_get_port_bindings(ex_port_bindings, if_ipv4, if_ipv6)) if bind_list: port_bindings[exposed_port] = bind_list return port_bindings
Generates the input dictionary contents for the ``port_bindings`` argument. :param container_config: Container configuration. :type container_config: dockermap.map.config.container.ContainerConfiguration :param client_config: Client configuration. :type client_config: dockermap.map.config.client.ClientConfiguration :return: Dictionary of ports with mapped port, and if applicable, with bind address :rtype: dict[unicode | str, list[unicode | str | int | tuple]]
def get(self, transform=None): if not self.has_expired() and self._cached_copy is not None: return self._cached_copy, False return self._refresh_cache(transform), True
Return the JSON defined at the S3 location in the constructor. The get method will reload the S3 object after the TTL has expired. Fetch the JSON object from cache or S3 if necessary
def _rle(self, a): ia = np.asarray(a) n = len(ia) y = np.array(ia[1:] != ia[:-1]) i = np.append(np.where(y), n - 1) z = np.diff(np.append(-1, i)) p = np.cumsum(np.append(0, z))[:-1] return (z, p, ia[i])
rle implementation credit to Thomas Browne from his SOF post Sept 2015 Parameters ---------- a : array, shape[n,] input vector Returns ------- z : array, shape[nt,] run lengths p : array, shape[nt,] start positions of each run ar : array, shape[nt,] values for each run
def rand_bivar(X, rho): import numpy as np Y = np.empty(X.shape) Y[:, 0] = X[:, 0] Y[:, 1] = rho * X[:, 0] + np.sqrt(1.0 - rho**2) * X[:, 1] return Y
Transform two unrelated random variables into correlated bivariate data X : ndarray two univariate random variables with N observations as <N x 2> matrix rho : float The Pearson correlations coefficient as number between [-1, +1]
def highlight(str1, str2): print('------------------------------') try: str2 = str2[0] except IndexError: str2 = None if str1 and str2: return str1.replace(str2, "<b>{}</b>".format(str2)) else: return str1
Highlight str1 with the contents of str2.
def validate(self, value) : for v in self.validators : v.validate(value) return True
checks the validity of 'value' given the lits of validators
def get_type_name(type_name, sub_type=None): if type_name in ("string", "enum"): return "string" if type_name == "float": return "float64" if type_name == "boolean": return "bool" if type_name == "list": st = get_type_name(type_name=sub_type, sub_type=None) if sub_type else "interface{}" return "[]%s" % st if type_name == "integer": return "int" if type_name == "time": return "float64" return "interface{}"
Returns a go type according to a spec type
def medium_integer(self, column, auto_increment=False, unsigned=False): return self._add_column('medium_integer', column, auto_increment=auto_increment, unsigned=unsigned)
Create a new medium integer column on the table. :param column: The column :type column: str :type auto_increment: bool :type unsigned: bool :rtype: Fluent
def closest_pair(arr, give="indicies"): idxs = [idx for idx in np.ndindex(arr.shape)] outs = [] min_dist = arr.max() - arr.min() for idxa in idxs: for idxb in idxs: if idxa == idxb: continue dist = abs(arr[idxa] - arr[idxb]) if dist == min_dist: if not [idxb, idxa] in outs: outs.append([idxa, idxb]) elif dist < min_dist: min_dist = dist outs = [[idxa, idxb]] if give == "indicies": return outs elif give == "distance": return min_dist else: raise KeyError("give not recognized in closest_pair")
Find the pair of indices corresponding to the closest elements in an array. If multiple pairs are equally close, both pairs of indicies are returned. Optionally returns the closest distance itself. I am sure that this could be written as a cheaper operation. I wrote this as a quick and dirty method because I need it now to use on some relatively small arrays. Feel free to refactor if you need this operation done as fast as possible. - Blaise 2016-02-07 Parameters ---------- arr : numpy.ndarray The array to search. give : {'indicies', 'distance'} (optional) Toggle return behavior. If 'distance', returns a single float - the closest distance itself. Default is indicies. Returns ------- list of lists of two tuples List containing lists of two tuples: indicies the nearest pair in the array. >>> arr = np.array([0, 1, 2, 3, 3, 4, 5, 6, 1]) >>> closest_pair(arr) [[(1,), (8,)], [(3,), (4,)]]
def get_app_guid(self, app_name): summary = self.space.get_space_summary() for app in summary['apps']: if app['name'] == app_name: return app['guid']
Returns the GUID for the app instance with the given name.
def bcc( self, bcc_emails, global_substitutions=None, is_multiple=False, p=0): if isinstance(bcc_emails, list): for email in bcc_emails: if isinstance(email, str): email = Bcc(email, None) if isinstance(email, tuple): email = Bcc(email[0], email[1]) self.add_bcc(email, global_substitutions, is_multiple, p) else: if isinstance(bcc_emails, str): bcc_emails = Bcc(bcc_emails, None) if isinstance(bcc_emails, tuple): bcc_emails = Bcc(bcc_emails[0], bcc_emails[1]) self.add_bcc(bcc_emails, global_substitutions, is_multiple, p)
Adds Bcc objects to the Personalization object :param bcc_emails: An Bcc or list of Bcc objects :type bcc_emails: Bcc, list(Bcc), tuple :param global_substitutions: A dict of substitutions for all recipients :type global_substitutions: dict :param is_multiple: Create a new personilization for each recipient :type is_multiple: bool :param p: p is the Personalization object or Personalization object index :type p: Personalization, integer, optional
def is_python_interpreter(filename): real_filename = os.path.realpath(filename) if (not osp.isfile(real_filename) or not is_python_interpreter_valid_name(filename)): return False elif is_pythonw(filename): if os.name == 'nt': if not encoding.is_text_file(real_filename): return True else: return False elif sys.platform == 'darwin': if is_anaconda() and encoding.is_text_file(real_filename): return True elif not encoding.is_text_file(real_filename): return True else: return False else: return False elif encoding.is_text_file(real_filename): return False else: return check_python_help(filename)
Evaluate wether a file is a python interpreter or not.
def read_option_value_from_nibble(nibble, pos, values): if nibble <= 12: return nibble, pos elif nibble == 13: tmp = struct.unpack("!B", values[pos].to_bytes(1, "big"))[0] + 13 pos += 1 return tmp, pos elif nibble == 14: s = struct.Struct("!H") tmp = s.unpack_from(values[pos:].to_bytes(2, "big"))[0] + 269 pos += 2 return tmp, pos else: raise AttributeError("Unsupported option nibble " + str(nibble))
Calculates the value used in the extended option fields. :param nibble: the 4-bit option header value. :return: the value calculated from the nibble and the extended option value.
def _ReadPropertySet(self, property_set): for property_section in property_set.sections: if property_section.class_identifier != self._CLASS_IDENTIFIER: continue for property_value in property_section.properties: property_name = self._PROPERTY_NAMES.get( property_value.identifier, None) if not property_name: property_name = '0x{0:04}'.format(property_value.identifier) value = self._GetValueAsObject(property_value) if self._PROPERTY_VALUE_MAPPINGS: value_callback_name = self._PROPERTY_VALUE_MAPPINGS.get( property_name, None) if value_callback_name: value_callback_method = getattr(self, value_callback_name, None) if value_callback_method: value = value_callback_method(value) if property_name in self._DATE_TIME_PROPERTIES: properties_dict = self.date_time_properties value = dfdatetime_filetime.Filetime(timestamp=value) else: properties_dict = self._properties if property_name not in properties_dict: properties_dict[property_name] = value
Reads properties from a property set. Args: property_set (pyolecf.property_set): OLECF property set.
def login(username, password, **kwargs): user_id = util.hdb.login_user(username, password) hydra_session = session.Session({}, validate_key=config.get('COOKIES', 'VALIDATE_KEY', 'YxaDbzUUSo08b+'), type='file', cookie_expires=True, data_dir=config.get('COOKIES', 'DATA_DIR', '/tmp'), file_dir=config.get('COOKIES', 'FILE_DIR', '/tmp/auth')) hydra_session['user_id'] = user_id hydra_session['username'] = username hydra_session.save() return (user_id, hydra_session.id)
Login a user, returning a dict containing their user_id and session_id This does the DB login to check the credentials, and then creates a session so that requests from apps do not need to perform a login args: username (string): The user's username password(string): The user's password (unencrypted) returns: A dict containing the user_id and session_id raises: HydraError if the login fails
def count(data, axis=None): return np.sum(np.logical_not(isnull(data)), axis=axis)
Count the number of non-NA in this array along the given axis or axes
def get_el(el): tag_name = el.elt.tagName.lower() if tag_name in {"input", "textarea", "select"}: return el.value else: raise ValueError( "Getter for %s (%s) not implemented!" % (tag_name, el.id) )
Get value of given `el` tag element. Automatically choose proper method to set the `value` based on the type of the `el`. Args: el (obj): Element reference to the input you want to convert to typeahead. Returns: str: Value of the object.
def dK_dr_via_X(self, X, X2): return self.dK_dr(self._scaled_dist(X, X2))
compute the derivative of K wrt X going through X
def plot(self, title=LABEL_DEFAULT, xlabel=LABEL_DEFAULT, ylabel=LABEL_DEFAULT): if title == "": title = " " if xlabel == "": xlabel = " " if ylabel == "": ylabel = " " if title is None: title = "" if xlabel is None: xlabel = "" if ylabel is None: ylabel = "" return Plot(self.__proxy__.plot(title, xlabel, ylabel))
Create a Plot object representing the SArray. Notes ----- - The plot will render either inline in a Jupyter Notebook, or in a native GUI window, depending on the value provided in `turicreate.visualization.set_target` (defaults to 'auto'). Parameters ---------- title : str The plot title to show for the resulting visualization. If the title is None, the title will be omitted. xlabel : str The X axis label to show for the resulting visualization. If the xlabel is None, the X axis label will be omitted. ylabel : str The Y axis label to show for the resulting visualization. If the ylabel is None, the Y axis label will be omitted. Returns ------- out : Plot A :class: Plot object that is the visualization of the SArray. Examples -------- Suppose 'sa' is an SArray, we can create a plot of it using: >>> plt = sa.plot() To override the default plot title and axis labels: >>> plt = sa.plot(title="My Plot Title", xlabel="My X Axis", ylabel="My Y Axis") We can then visualize the plot using: >>> plt.show()
def delete_thumbnails(relative_source_path, root=None, basedir=None, subdir=None, prefix=None): thumbs = thumbnails_for_file(relative_source_path, root, basedir, subdir, prefix) return _delete_using_thumbs_list(thumbs)
Delete all thumbnails for a source image.
def where(cmd, path=None): raw_result = shutil.which(cmd, os.X_OK, path) if raw_result: return os.path.abspath(raw_result) else: raise ValueError("Could not find '{}' in the path".format(cmd))
A function to wrap shutil.which for universal usage
def mean_curve(values, weights=None): if weights is None: weights = [1. / len(values)] * len(values) if not isinstance(values, numpy.ndarray): values = numpy.array(values) return numpy.average(values, axis=0, weights=weights)
Compute the mean by using numpy.average on the first axis.
def urlsafe_b64decode(data): pad = b'=' * (4 - (len(data) & 3)) return base64.urlsafe_b64decode(data + pad)
urlsafe_b64decode without padding
def calculate_descriptors(self,mol): self.ligand_atoms = {index:{"name":x.name} for index,x in enumerate(self.topology_data.universe.ligand_noH.atoms)} contribs = self.calculate_logP(mol) self.calculate_Gasteiger_charges(mol) fcharges = self.calculate_formal_charge(mol) for atom in self.ligand_atoms.keys(): self.ligand_atoms[atom]["logP"]=contribs[atom][0] self.ligand_atoms[atom]["MR"]=contribs[atom][1] self.ligand_atoms[atom]["Gasteiger_ch"]=mol.GetAtomWithIdx(atom).GetProp("_GasteigerCharge") self.ligand_atoms[atom]["Formal charges"]=fcharges[atom] self.rot_bonds=self.get_rotatable_bonds(mol)
Calculates descriptors such as logP, charges and MR and saves that in a dictionary.
def _writeFeatures(self, i, image): basename = 'features-%d.txt' % i filename = '%s/%s' % (self._outputDir, basename) featureList = image['graphInfo']['features'] with open(filename, 'w') as fp: for feature in featureList: fp.write('%s\n\n' % feature.feature) return basename
Write a text file containing the features as a table. @param i: The number of the image in self._images. @param image: A member of self._images. @return: The C{str} features file name - just the base name, not including the path to the file.
def parse_pseudo_open(self, sel, name, has_selector, iselector, index): flags = FLG_PSEUDO | FLG_OPEN if name == ':not': flags |= FLG_NOT if name == ':has': flags |= FLG_RELATIVE sel.selectors.append(self.parse_selectors(iselector, index, flags)) has_selector = True return has_selector
Parse pseudo with opening bracket.
def list_processed_parameter_groups(self): path = '/archive/{}/parameter-groups'.format(self._instance) response = self._client.get_proto(path=path) message = archive_pb2.ParameterGroupInfo() message.ParseFromString(response.content) groups = getattr(message, 'group') return iter(groups)
Returns the existing parameter groups. :rtype: ~collections.Iterable[str]
def rename(store, src_path, dst_path): src_path = normalize_storage_path(src_path) dst_path = normalize_storage_path(dst_path) if hasattr(store, 'rename'): store.rename(src_path, dst_path) else: _rename_from_keys(store, src_path, dst_path)
Rename all items under the given path. If `store` provides a `rename` method, this will be called, otherwise will fall back to implementation via the `MutableMapping` interface.
async def cycle(self, channel): if not self.in_channel(channel): raise NotInChannel(channel) password = self.channels[channel]['password'] await self.part(channel) await self.join(channel, password)
Rejoin channel.
def _set_scores(self): anom_scores = {} self._generate_SAX() self._construct_all_SAX_chunk_dict() length = self.time_series_length lws = self.lag_window_size fws = self.future_window_size for i, timestamp in enumerate(self.time_series.timestamps): if i < lws or i > length - fws: anom_scores[timestamp] = 0 else: anom_scores[timestamp] = self._compute_anom_score_between_two_windows(i) self.anom_scores = TimeSeries(self._denoise_scores(anom_scores))
Compute anomaly scores for the time series by sliding both lagging window and future window.
def Equals(self, other): if other is None: return False if other.PrevHash.ToBytes() == self.PrevHash.ToBytes() and other.PrevIndex == self.PrevIndex: return True return False
Test for equality. Args: other (obj): Returns: bool: True `other` equals self.
def delete_state_changes(self, state_changes_to_delete: List[int]) -> None: with self.write_lock, self.conn: self.conn.executemany( 'DELETE FROM state_events WHERE identifier = ?', state_changes_to_delete, )
Delete state changes. Args: state_changes_to_delete: List of ids to delete.
def get_real_end_line(token): end_line = token.end_mark.line + 1 if not isinstance(token, yaml.ScalarToken): return end_line pos = token.end_mark.pointer - 1 while (pos >= token.start_mark.pointer - 1 and token.end_mark.buffer[pos] in string.whitespace): if token.end_mark.buffer[pos] == '\n': end_line -= 1 pos -= 1 return end_line
Finds the line on which the token really ends. With pyyaml, scalar tokens often end on a next line.
def get_msg(self): width = 72 _msg = self.msg % {'distro': self.distro, 'vendor': self.vendor, 'vendor_url': self.vendor_url, 'vendor_text': self.vendor_text, 'tmpdir': self.commons['tmpdir']} _fmt = "" for line in _msg.splitlines(): _fmt = _fmt + fill(line, width, replace_whitespace=False) + '\n' return _fmt
This method is used to prepare the preamble text to display to the user in non-batch mode. If your policy sets self.distro that text will be substituted accordingly. You can also override this method to do something more complicated.
def handle_dataframe( df: pd.DataFrame, entrez_id_name, log2_fold_change_name, adjusted_p_value_name, entrez_delimiter, base_mean=None, ) -> List[Gene]: logger.info("In _handle_df()") if base_mean is not None and base_mean in df.columns: df = df[pd.notnull(df[base_mean])] df = df[pd.notnull(df[entrez_id_name])] df = df[pd.notnull(df[log2_fold_change_name])] df = df[pd.notnull(df[adjusted_p_value_name])] return [ Gene( entrez_id=entrez_id, log2_fold_change=data[log2_fold_change_name], padj=data[adjusted_p_value_name] ) for _, data in df.iterrows() for entrez_id in str(data[entrez_id_name]).split(entrez_delimiter) ]
Convert data frame on differential expression values as Gene objects. :param df: Data frame with columns showing values on differential expression. :param cfp: An object that includes paths, cutoffs and other information. :return list: A list of Gene objects.
def build_routes(pfeed): f = pfeed.frequencies[['route_short_name', 'route_long_name', 'route_type', 'shape_id']].drop_duplicates().copy() f['route_id'] = 'r' + f['route_short_name'].map(str) del f['shape_id'] return f
Given a ProtoFeed, return a DataFrame representing ``routes.txt``.
def transitive_reduction(self): combinations = [] for node, edges in self.graph.items(): combinations += [[node, edge] for edge in edges] while True: new_combinations = [] for comb1 in combinations: for comb2 in combinations: if not comb1[-1] == comb2[0]: continue new_entry = comb1 + comb2[1:] if new_entry not in combinations: new_combinations.append(new_entry) if not new_combinations: break combinations += new_combinations constructed = {(c[0], c[-1]) for c in combinations if len(c) != 2} for node, edges in self.graph.items(): bad_nodes = {e for n, e in constructed if node == n} self.graph[node] = edges - bad_nodes
Performs a transitive reduction on the DAG. The transitive reduction of a graph is a graph with as few edges as possible with the same reachability as the original graph. See https://en.wikipedia.org/wiki/Transitive_reduction
def arity_evaluation_checker(function): is_class = inspect.isclass(function) if is_class: function = function.__init__ function_info = inspect.getargspec(function) function_args = function_info.args if is_class: function_args = function_args[1:] def evaluation_checker(*args, **kwargs): kwarg_keys = set(kwargs.keys()) if function_info.keywords is None: acceptable_kwargs = function_args[len(args):] if not kwarg_keys.issubset(acceptable_kwargs): TypeError("Unrecognized Arguments: {0}".format( [key for key in kwarg_keys if key not in acceptable_kwargs] )) needed_args = function_args[len(args):] if function_info.defaults: needed_args = needed_args[:-len(function_info.defaults)] return not needed_args or kwarg_keys.issuperset(needed_args) return evaluation_checker
Build an evaluation checker that will return True when it is guaranteed that all positional arguments have been accounted for.
def update_config_value(self, config_m, config_key): config_value = config_m.get_current_config_value(config_key) if config_m is self.core_config_model: list_store = self.core_list_store elif config_m is self.gui_config_model: list_store = self.gui_list_store else: return self._update_list_store_entry(list_store, config_key, config_value)
Updates the corresponding list store of a changed config value :param ConfigModel config_m: The config model that has been changed :param str config_key: The config key who's value has been changed
def verify_param(self, param, must=[], r=None): if APIKEY not in param: param[APIKEY] = self.apikey() r = Result() if r is None else r for p in must: if p not in param: r.code(Code.ARGUMENT_MISSING).detail('missing-' + p) break return r
return Code.ARGUMENT_MISSING if every key in must not found in param
def write_csvs(self, asset_map, show_progress=False, invalid_data_behavior='warn'): read = partial( read_csv, parse_dates=['day'], index_col='day', dtype=self._csv_dtypes, ) return self.write( ((asset, read(path)) for asset, path in iteritems(asset_map)), assets=viewkeys(asset_map), show_progress=show_progress, invalid_data_behavior=invalid_data_behavior, )
Read CSVs as DataFrames from our asset map. Parameters ---------- asset_map : dict[int -> str] A mapping from asset id to file path with the CSV data for that asset show_progress : bool Whether or not to show a progress bar while writing. invalid_data_behavior : {'warn', 'raise', 'ignore'} What to do when data is encountered that is outside the range of a uint32.
def decrypt(self, data): decrypted_data = b"" for i in range(0, len(data), 8): block = data[i:i + 8] block_length = len(block) if block_length != 8: raise ValueError("DES decryption must be a multiple of 8 " "bytes") decrypted_data += self._decode_block(block) return decrypted_data
DES decrypts the data based on the key it was initialised with. :param data: The encrypted bytes string to decrypt :return: The decrypted bytes string
def db(cls, path=None): if cls.PATH is None and path is None: raise Exception("No database specified") if path is None: path = cls.PATH if "." not in path: raise Exception(('invalid path "%s"; database paths must be ' + 'of the form "database.collection"') % (path,)) if CONNECTION.test_mode: return CONNECTION.get_connection()[TEST_DATABASE_NAME][path] (db, coll) = path.split('.', 1) return CONNECTION.get_connection()[db][coll]
Returns a pymongo Collection object from the current database connection. If the database connection is in test mode, collection will be in the test database. @param path: if is None, the PATH attribute of the current class is used; if is not None, this is used instead @raise Exception: if neither cls.PATH or path are valid
def recalibrate_quality(bam_file, sam_ref, dbsnp_file, picard_dir): cl = ["picard_gatk_recalibrate.py", picard_dir, sam_ref, bam_file] if dbsnp_file: cl.append(dbsnp_file) subprocess.check_call(cl) out_file = glob.glob("%s*gatkrecal.bam" % os.path.splitext(bam_file)[0])[0] return out_file
Recalibrate alignments with GATK and provide pdf summary.
def wait_for_task(upid, timeout=300): start_time = time.time() info = _lookup_proxmox_task(upid) if not info: log.error('wait_for_task: No task information ' 'retrieved based on given criteria.') raise SaltCloudExecutionFailure while True: if 'status' in info and info['status'] == 'OK': log.debug('Task has been finished!') return True time.sleep(3) if time.time() - start_time > timeout: log.debug('Timeout reached while waiting for task to be finished') return False info = _lookup_proxmox_task(upid)
Wait until a the task has been finished successfully
def sys_register_SDL_renderer(callback: Callable[[Any], None]) -> None: with _PropagateException() as propagate: @ffi.def_extern(onerror=propagate) def _pycall_sdl_hook(sdl_surface: Any) -> None: callback(sdl_surface) lib.TCOD_sys_register_SDL_renderer(lib._pycall_sdl_hook)
Register a custom randering function with libtcod. Note: This callback will only be called by the SDL renderer. The callack will receive a :any:`CData <ffi-cdata>` void* to an SDL_Surface* struct. The callback is called on every call to :any:`tcod.console_flush`. Args: callback Callable[[CData], None]: A function which takes a single argument.
def bind_type(python_value): binding_table = {'bool': Bool, 'int': Int, 'float': Float} if python_value is None: return NoneType() python_type = type(python_value) gibica_type = binding_table.get(python_type.__name__) if gibica_type is None: raise TypeError('Impossible to recognize underlying type.') return gibica_type(python_value)
Return a Gibica type derived from a Python type.
def get_func_args(func): if isinstance(func, functools.partial): return _signature(func.func) if inspect.isfunction(func) or inspect.ismethod(func): return _signature(func) return _signature(func.__call__)
Given a callable, return a tuple of argument names. Handles `functools.partial` objects and class-based callables. .. versionchanged:: 3.0.0a1 Do not return bound arguments, eg. ``self``.
def make_interval(long_name, short_name): return Group( Regex("(-+)?[0-9]+") + ( upkey(long_name + "s") | Regex(long_name + "s").setParseAction(upcaseTokens) | upkey(long_name) | Regex(long_name).setParseAction(upcaseTokens) | upkey(short_name) | Regex(short_name).setParseAction(upcaseTokens) ) ).setResultsName(long_name)
Create an interval segment
def samples(self): if not hasattr(self,'sampler') and self._samples is None: raise AttributeError('Must run MCMC (or load from file) '+ 'before accessing samples') if self._samples is not None: df = self._samples else: self._make_samples() df = self._samples return df
Dataframe with samples drawn from isochrone according to posterior Columns include both the sampling parameters from the MCMC fit (mass, age, Fe/H, [distance, A_V]), and also evaluation of the :class:`Isochrone` at each of these sample points---this is how chains of physical/observable parameters get produced.
def _iter_keys(key): for i in range(winreg.QueryInfoKey(key)[0]): yield winreg.OpenKey(key, winreg.EnumKey(key, i))
! Iterate over subkeys of a key
def primers(self, tm=60): self.primers = coral.design.primers(self.template, tm=tm) return self.primers
Design primers for amplifying the assembled sequence. :param tm: melting temperature (lower than overlaps is best). :type tm: float :returns: Primer list (the output of coral.design.primers). :rtype: list
def gborders(img, alpha=1.0, sigma=1.0): gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant') return 1.0/np.sqrt(1.0 + alpha*gradnorm)
Stopping criterion for image borders.
def get_wsgi_request_object(curr_request, method, url, headers, body): x_headers = headers_to_include_from_request(curr_request) method, t_headers = pre_process_method_headers(method, headers) if "CONTENT_TYPE" not in t_headers: t_headers.update({"CONTENT_TYPE": _settings.DEFAULT_CONTENT_TYPE}) x_headers.update(t_headers) content_type = x_headers.get("CONTENT_TYPE", _settings.DEFAULT_CONTENT_TYPE) _request_factory = BatchRequestFactory() _request_provider = getattr(_request_factory, method) secure = _settings.USE_HTTPS request = _request_provider(url, data=body, secure=secure, content_type=content_type, **x_headers) return request
Based on the given request parameters, constructs and returns the WSGI request object.
def from_snl(cls, snl): hist = [] for h in snl.history: d = h.description d['_snl'] = {'url': h.url, 'name': h.name} hist.append(d) return cls(snl.structure, history=hist)
Create TransformedStructure from SNL. Args: snl (StructureNL): Starting snl Returns: TransformedStructure
def get_tissue_in_references(self, entry): tissue_in_references = [] query = "./reference/source/tissue" tissues = {x.text for x in entry.iterfind(query)} for tissue in tissues: if tissue not in self.tissues: self.tissues[tissue] = models.TissueInReference(tissue=tissue) tissue_in_references.append(self.tissues[tissue]) return tissue_in_references
get list of models.TissueInReference from XML node entry :param entry: XML node entry :return: list of :class:`pyuniprot.manager.models.TissueInReference` objects
def stream_members(self, stream_id): response, status_code = self.__pod__.Streams.get_v1_admin_stream_id_membership_list( sessionToken=self.__session__, id=stream_id ).result() self.logger.debug('%s: %s' % (status_code, response)) return status_code, response
get stream members
def process_binding_statements(self): G = self.G statements = [] binding_nodes = self.find_event_with_outgoing_edges('Binding', ['Theme', 'Theme2']) for node in binding_nodes: theme1 = self.get_entity_text_for_relation(node, 'Theme') theme1_node = self.get_related_node(node, 'Theme') theme2 = self.get_entity_text_for_relation(node, 'Theme2') assert(theme1 is not None) assert(theme2 is not None) evidence = self.node_to_evidence(theme1_node, is_direct=True) statements.append(Complex([s2a(theme1), s2a(theme2)], evidence=evidence)) return statements
Looks for Binding events in the graph and extracts them into INDRA statements. In particular, looks for a Binding event node with outgoing edges with relations Theme and Theme2 - the entities these edges point to are the two constituents of the Complex INDRA statement.
def _get_parameter_conversion_entry(parameter_config): entry = _PARAM_CONVERSION_MAP.get(parameter_config.get('type')) if entry is None and 'enum' in parameter_config: entry = _PARAM_CONVERSION_MAP['enum'] return entry
Get information needed to convert the given parameter to its API type. Args: parameter_config: The dictionary containing information specific to the parameter in question. This is retrieved from request.parameters in the method config. Returns: The entry from _PARAM_CONVERSION_MAP with functions/information needed to validate and convert the given parameter from a string to the type expected by the API.
def _parse_deploy(self, deploy_values: dict, service_config: dict): mode = {} for d_value in deploy_values: if 'restart_policy' in d_value: restart_spec = docker.types.RestartPolicy( **deploy_values[d_value]) service_config['restart_policy'] = restart_spec if 'placement' in d_value: for constraints_key, constraints_value in \ deploy_values[d_value].items(): service_config[constraints_key] = constraints_value if 'mode' in d_value: mode[d_value] = deploy_values[d_value] if 'replicas' in d_value: mode[d_value] = deploy_values[d_value] if 'resources' in d_value: resource_spec = self._parse_resources( deploy_values, d_value) service_config['resources'] = resource_spec mode_spec = docker.types.ServiceMode(**mode) service_config['mode'] = mode_spec
Parse deploy key. Args: deploy_values (dict): deploy configuration values service_config (dict): Service configuration
def supportsType(self, type_uri): return ( (type_uri in self.type_uris) or (type_uri == OPENID_2_0_TYPE and self.isOPIdentifier()) )
Does this endpoint support this type? I consider C{/server} endpoints to implicitly support C{/signon}.
def _RunSingleHook(self, hook_cls, executed_set, required=None): if hook_cls in executed_set: return for pre_hook in hook_cls.pre: self._RunSingleHook(pre_hook, executed_set, required=hook_cls.__name__) cls_instance = hook_cls() if required: logging.debug("Initializing %s, required by %s", hook_cls.__name__, required) else: logging.debug("Initializing %s", hook_cls.__name__) cls_instance.Run() executed_set.add(hook_cls) if hook_cls not in self.already_run_once: cls_instance.RunOnce() self.already_run_once.add(hook_cls)
Run the single hook specified by resolving all its prerequisites.
def PyParseRangeCheck(lower_bound, upper_bound): def CheckRange(string, location, tokens): try: check_number = tokens[0] except IndexError: check_number = -1 if check_number < lower_bound: raise pyparsing.ParseException( 'Value: {0:d} precedes lower bound: {1:d}'.format( check_number, lower_bound)) if check_number > upper_bound: raise pyparsing.ParseException( 'Value: {0:d} exceeds upper bound: {1:d}'.format( check_number, upper_bound)) return CheckRange
Verify that a number is within a defined range. This is a callback method for pyparsing setParseAction that verifies that a read number is within a certain range. To use this method it needs to be defined as a callback method in setParseAction with the upper and lower bound set as parameters. Args: lower_bound (int): lower bound of the range. upper_bound (int): upper bound of the range. Returns: Function: callback method that can be used by pyparsing setParseAction.
def read_hierarchy(self, fid): lin = self.read_line(fid) while lin != 'end': parts = lin.split() if lin != 'begin': ind = self.get_index_by_name(parts[0]) for i in range(1, len(parts)): self.vertices[ind].children.append(self.get_index_by_name(parts[i])) lin = self.read_line(fid) lin = self.read_line(fid) return lin
Read hierarchy information from acclaim skeleton file stream.
def show_gateway_device(self, gateway_device_id, **_params): return self.get(self.gateway_device_path % gateway_device_id, params=_params)
Fetch a gateway device.
def restore(self): signal.signal(signal.SIGINT, self.original_sigint) signal.signal(signal.SIGTERM, self.original_sigterm) if os.name == 'nt': signal.signal(signal.SIGBREAK, self.original_sigbreak)
Restore signal handlers to their original settings.
def post(url, data=None, json=None, **kwargs): _set_content_type(kwargs) if _content_type_is_json(kwargs) and data is not None: data = dumps(data) _log_request('POST', url, kwargs, data) response = requests.post(url, data, json, **kwargs) _log_response(response) return response
A wrapper for ``requests.post``.
def all(self): self._check_layout() return LayoutSlice(self.layout, slice(0, len(self.layout.fields), 1))
Returns all layout objects of first level of depth
def get_one(self, fields=list()): response = self.session.get(self._get_table_url(), params=self._get_formatted_query(fields, limit=None, order_by=list(), offset=None)) content = self._get_content(response) l = len(content) if l > 1: raise MultipleResults('Multiple results for get_one()') if len(content) == 0: return {} return content[0]
Convenience function for queries returning only one result. Validates response before returning. :param fields: List of fields to return in the result :raise: :MultipleResults: if more than one match is found :return: - Record content
def init_logging(): fmt = '%(asctime)s.%(msecs)03d | %(name)-60s | %(levelname)-7s ' \ '| %(message)s' logging.basicConfig(format=fmt, datefmt='%H:%M:%S', level=logging.DEBUG)
Initialise Python logging.
def frequencies_plot(self, xmin=0, xmax=200): helptext = pconfig = { 'id': 'Jellyfish_kmer_plot', 'title': 'Jellyfish: K-mer plot', 'ylab': 'Counts', 'xlab': 'k-mer frequency', 'xDecimals': False, 'xmin': xmin, 'xmax': xmax } self.add_section( anchor = 'jellyfish_kmer_plot', description = 'The K-mer plot lets you estimate library complexity and coverage from k-mer content.', helptext = helptext, plot = linegraph.plot(self.jellyfish_data, pconfig) )
Generate the qualities plot
def get_shortcut(self, name): name = self.__normalize_name(name) action = self.get_action(name) if not action: return "" return action.shortcut().toString()
Returns given action shortcut. :param name: Action to retrieve the shortcut. :type name: unicode :return: Action shortcut. :rtype: unicode
def get_pins(self): result = [] for a in range(0, 7): result.append(GPIOPin(self, '_action', {'pin': 'A{}'.format(a)}, name='A{}'.format(a))) for b in range(0, 7): result.append(GPIOPin(self, '_action', {'pin': 'B{}'.format(b)}, name='B{}'.format(b))) return result
Get a list containing references to all 16 pins of the chip. :Example: >>> expander = MCP23017I2C(gw) >>> pins = expander.get_pins() >>> pprint.pprint(pins) [<GPIOPin A0 on MCP23017I2C>, <GPIOPin A1 on MCP23017I2C>, <GPIOPin A2 on MCP23017I2C>, <GPIOPin A3 on MCP23017I2C>, <GPIOPin A4 on MCP23017I2C>, <GPIOPin A5 on MCP23017I2C>, <GPIOPin A6 on MCP23017I2C>, <GPIOPin B0 on MCP23017I2C>, <GPIOPin B1 on MCP23017I2C>, <GPIOPin B2 on MCP23017I2C>, <GPIOPin B3 on MCP23017I2C>, <GPIOPin B4 on MCP23017I2C>, <GPIOPin B5 on MCP23017I2C>, <GPIOPin B6 on MCP23017I2C>]
def compose(f, *fs): rfs = list(chain([f], fs)) rfs.reverse() def composed(*args, **kwargs): return reduce( lambda result, fn: fn(result), rfs[1:], rfs[0](*args, **kwargs)) return composed
Compose functions right to left. compose(f, g, h)(x) -> f(g(h(x))) Args: f, *fs: The head and rest of a sequence of callables. The rightmost function passed can accept any arguments and the returned function will have the same signature as this last provided function. All preceding functions must be unary. Returns: The composition of the argument functions. The returned function will accept the same arguments as the rightmost passed in function.
def _inputcooker_store(self, char): if self.sb: self.sbdataq = self.sbdataq + char else: self.inputcooker_store_queue(char)
Put the cooked data in the correct queue
def _changeGroupImage(self, image_id, thread_id=None): thread_id, thread_type = self._getThread(thread_id, None) data = {"thread_image_id": image_id, "thread_id": thread_id} j = self._post(self.req_url.THREAD_IMAGE, data, fix_request=True, as_json=True) return image_id
Changes a thread image from an image id :param image_id: ID of uploaded image :param thread_id: User/Group ID to change image. See :ref:`intro_threads` :raises: FBchatException if request failed
def _update_field(self, action, field, value, max_tries, tries=0): self.fetch() action(self, field, value) try: self.save() except requests.HTTPError as ex: if tries < max_tries and ex.response.status_code == 409: self._update_field( action, field, value, max_tries, tries=tries+1) else: raise
Private update_field method. Wrapped by Document.update_field. Tracks a "tries" var to help limit recursion.
def document_quote (document): doc, query = urllib.splitquery(document) doc = url_quote_part(doc, '/=,') if query: return "%s?%s" % (doc, query) return doc
Quote given document.
def is_text_extractor_available(extension: str) -> bool: if extension is not None: extension = extension.lower() info = ext_map.get(extension) if info is None: return False availability = info[AVAILABILITY] if type(availability) == bool: return availability elif callable(availability): return availability() else: raise ValueError( "Bad information object for extension: {}".format(extension))
Is a text extractor available for the specified extension?
def set_of_vars(arg_plot): return set(var for var in arg_plot.split(',') if var in phyvars.PLATES)
Build set of needed variables. Args: arg_plot (str): string with variable names separated with ``,``. Returns: set of str: set of variables.
def match_gpus(available_devices, requirements): if not requirements: return [] if not available_devices: raise InsufficientGPUError("No GPU devices available, but {} devices required.".format(len(requirements))) available_devices = available_devices.copy() used_devices = [] for req in requirements: dev = search_device(req, available_devices) if dev: used_devices.append(dev) available_devices.remove(dev) else: raise InsufficientGPUError("Not all GPU requirements could be fulfilled.") return used_devices
Determines sufficient GPUs for the given requirements and returns a list of GPUDevices. If there aren't sufficient GPUs a InsufficientGPUException is thrown. :param available_devices: A list of GPUDevices :param requirements: A list of GPURequirements :return: A list of sufficient devices
def viewport_to_screen_space(framebuffer_size: vec2, point: vec4) -> vec2: return (framebuffer_size * point.xy) / point.w
Transform point in viewport space to screen space.
def ols_covariance(self): Y = np.array([reg[self.lags:reg.shape[0]] for reg in self.data]) return (1.0/(Y[0].shape[0]))*np.dot(self.residuals(Y),np.transpose(self.residuals(Y)))
Creates OLS estimate of the covariance matrix Returns ---------- The OLS estimate of the covariance matrix
def load_device(self, serial=None): serials = android_device.list_adb_devices() if not serials: raise Error('No adb device found!') if not serial: env_serial = os.environ.get('ANDROID_SERIAL', None) if env_serial is not None: serial = env_serial elif len(serials) == 1: serial = serials[0] else: raise Error( 'Expected one phone, but %d found. Use the -s flag or ' 'specify ANDROID_SERIAL.' % len(serials)) if serial not in serials: raise Error('Device "%s" is not found by adb.' % serial) ads = android_device.get_instances([serial]) assert len(ads) == 1 self._ad = ads[0]
Creates an AndroidDevice for the given serial number. If no serial is given, it will read from the ANDROID_SERIAL environmental variable. If the environmental variable is not set, then it will read from 'adb devices' if there is only one.
def exists(self, obj_id, obj_type=None): return self.object_key(obj_id, obj_type) in self._data
Return whether the given object exists in the search index. :param obj_id: The object's unique identifier. :param obj_type: The object's type.
def return_real_id(self): if self._prepopulated is False: raise errors.EmptyDatabase(self.dbpath) else: return return_real_id_base(self.dbpath, self._set_object)
Returns a list of real_id's Parameters ---------- Returns ------- A list of real_id values for the dataset (a real_id is the filename minus the suffix and prefix)
def set_unobserved_before(self,tlen,qlen,nt,p): self._unobservable.set_before(tlen,qlen,nt,p)
Set the unobservable sequence data before this base :param tlen: target homopolymer length :param qlen: query homopolymer length :param nt: nucleotide :param p: p is the probability of attributing this base to the unobserved error :type tlen: int :type qlen: int :type nt: char :type p: float
def model_deleted(sender, instance, using, **kwargs): opts = get_opts(instance) model = '.'.join([opts.app_label, opts.object_name]) distill_model_event(instance, model, 'deleted')
Automatically triggers "deleted" actions.
def Sign(verifiable, keypair): prikey = bytes(keypair.PrivateKey) hashdata = verifiable.GetHashData() res = Crypto.Default().Sign(hashdata, prikey) return res
Sign the `verifiable` object with the private key from `keypair`. Args: verifiable: keypair (neocore.KeyPair): Returns: bool: True if successfully signed. False otherwise.
def load_vint(buf, pos): limit = min(pos + 11, len(buf)) res = ofs = 0 while pos < limit: b = _byte_code(buf[pos]) res += ((b & 0x7F) << ofs) pos += 1 ofs += 7 if b < 0x80: return res, pos raise BadRarFile('cannot load vint')
Load variable-size int.
def run( categories, param_file, project_dir, plugin, target, status_update_interval ): return _run( categories, param_file, project_dir, plugin, target, status_update_interval )
Generate code for this project and run it
def query(cls, *criteria, **filters): query = cls.dbmodel.query.filter( *criteria).filter_by(**filters) return [cls(obj) for obj in query.all()]
Wrap sqlalchemy query methods. A wrapper for the filter and filter_by functions of sqlalchemy. Define a dict with which columns should be filtered by which values. .. codeblock:: python WorkflowObject.query(id=123) WorkflowObject.query(status=ObjectStatus.COMPLETED) The function supports also "hybrid" arguments using WorkflowObjectModel indirectly. .. codeblock:: python WorkflowObject.query( WorkflowObject.dbmodel.status == ObjectStatus.COMPLETED, user_id=user_id ) See also SQLAlchemy BaseQuery's filter and filter_by documentation.
def allan_variance(data, dt, tmax=10): allanvar = [] nmax = len(data) if len(data) < tmax / dt else int(tmax / dt) for i in range(1, nmax+1): databis = data[len(data) % i:] y = databis.reshape(len(data)//i, i).mean(axis=1) allanvar.append(((y[1:] - y[:-1])**2).mean() / 2) return dt * np.arange(1, nmax+1), np.array(allanvar)
Calculate Allan variance. Args: data (np.ndarray): Input data. dt (float): Time between each data. tmax (float): Maximum time. Returns: vk (np.ndarray): Frequency. allanvar (np.ndarray): Allan variance.
def to_type_with_default(value_type, value, default_value): result = TypeConverter.to_nullable_type(value_type, value) return result if result != None else default_value
Converts value into an object type specified by Type Code or returns default value when conversion is not possible. :param value_type: the TypeCode for the data type into which 'value' is to be converted. :param value: the value to convert. :param default_value: the default value to return if conversion is not possible (returns None). :return: object value of type corresponding to TypeCode, or default value when conversion is not supported.
def zSaveFile(self, fileName): cmd = "SaveFile,{}".format(fileName) reply = self._sendDDEcommand(cmd) return int(float(reply.rstrip()))
Saves the lens currently loaded in the server to a Zemax file
def expect_dimensions(__funcname=_qualified_name, **dimensions): if isinstance(__funcname, str): def get_funcname(_): return __funcname else: get_funcname = __funcname def _expect_dimension(expected_ndim): def _check(func, argname, argvalue): actual_ndim = argvalue.ndim if actual_ndim != expected_ndim: if actual_ndim == 0: actual_repr = 'scalar' else: actual_repr = "%d-D array" % actual_ndim raise ValueError( "{func}() expected a {expected:d}-D array" " for argument {argname!r}, but got a {actual}" " instead.".format( func=get_funcname(func), expected=expected_ndim, argname=argname, actual=actual_repr, ) ) return argvalue return _check return preprocess(**valmap(_expect_dimension, dimensions))
Preprocessing decorator that verifies inputs are numpy arrays with a specific dimensionality. Examples -------- >>> from numpy import array >>> @expect_dimensions(x=1, y=2) ... def foo(x, y): ... return x[0] + y[0, 0] ... >>> foo(array([1, 1]), array([[1, 1], [2, 2]])) 2 >>> foo(array([1, 1]), array([1, 1])) # doctest: +NORMALIZE_WHITESPACE ... # doctest: +ELLIPSIS Traceback (most recent call last): ... ValueError: ...foo() expected a 2-D array for argument 'y', but got a 1-D array instead.