code
stringlengths
51
2.38k
docstring
stringlengths
4
15.2k
def download_api(branch=None) -> str: habitica_github_api = 'https://api.github.com/repos/HabitRPG/habitica' if not branch: branch = requests.get(habitica_github_api + '/releases/latest').json()['tag_name'] curl = local['curl']['-sL', habitica_github_api + '/tarball/{}'.format(branch)] tar = loc...
download API documentation from _branch_ of Habitica\'s repo on Github
def all_host_infos(): output = [] output.append(["Operating system", os()]) output.append(["CPUID information", cpu()]) output.append(["CC information", compiler()]) output.append(["JDK information", from_cmd("java -version")]) output.append(["MPI information", from_cmd("mpirun -version")]) ...
Summarize all host information.
def get_values(self, *args, **kwargs): if isinstance(args[0], list): raise ValueError("Can only get_values() for a single tag.") response = self.get_datapoints(*args, **kwargs) for value in response['tags'][0]['results'][0]['values']: yield [datetime.datetime.utcfromtimes...
Convenience method that for simple single tag queries will return just the values to be iterated on.
def check_dipole(inp, name, verb): r _check_shape(np.squeeze(inp), name, (3,)) inp[0] = _check_var(inp[0], float, 1, name+'-x') inp[1] = _check_var(inp[1], float, 1, name+'-y', inp[0].shape) inp[2] = _check_var(inp[2], float, 1, name+'-z', (1,)) if verb > 2: if name == 'src': ...
r"""Check dipole parameters. This check-function is called from one of the modelling routines in :mod:`model`. Consult these modelling routines for a detailed description of the input parameters. Parameters ---------- inp : list of floats or arrays Pole coordinates (m): [pole-x, pole-...
def has_extensions(self, *exts): file_ext = splitext(self.filename)[1] file_ext = file_ext.lower() for e in exts: if file_ext == e: return True return False
Check if file has one of the extensions.
def search_people_by_bio(query, limit_results=DEFAULT_LIMIT, index=['onename_people_index']): from pyes import QueryStringQuery, ES conn = ES() q = QueryStringQuery(query, search_fields=['username', 'profile_bio'], default_operator='...
queries lucene index to find a nearest match, output is profile username
def set_pattern_step_setpoint(self, patternnumber, stepnumber, setpointvalue): _checkPatternNumber(patternnumber) _checkStepNumber(stepnumber) _checkSetpointValue(setpointvalue, self.setpoint_max) address = _calculateRegisterAddress('setpoint', patternnumber, stepnumber) self.wri...
Set the setpoint value for a step. Args: * patternnumber (integer): 0-7 * stepnumber (integer): 0-7 * setpointvalue (float): Setpoint value
def connection_class(self, adapter): if self.adapters.get(adapter): return self.adapters[adapter] try: class_prefix = getattr( __import__('db.' + adapter, globals(), locals(), ['__class_prefix__']), '__class_prefix__') driver...
Get connection class by adapter
def _have_pyspark(): if _have_pyspark.flag is None: try: if PackageStore.get_parquet_lib() is ParquetLib.SPARK: import pyspark _have_pyspark.flag = True else: _have_pyspark.flag = False except ImportError: _have_pysp...
Check if we're running Pyspark
def _filter_nonextensions(cls, obj): if hasattr(obj, '__dict__') and obj.__dict__.get('__NO_EXTENSION__', False) is True: return False return True
Remove all classes marked as not extensions. This allows us to have a deeper hierarchy of classes than just one base class that is filtered by _filter_subclasses. Any class can define a class propery named: __NO_EXTENSION__ = True That class will never be returned as an exten...
def _get_node_groups(self): node_dict = {node['data']['id']: {'sources': [], 'targets': []} for node in self._nodes} for edge in self._edges: edge_data = (edge['data']['i'], edge['data']['polarity'], edge['data']['source']) node_dict[...
Return a list of node id lists that are topologically identical. First construct a node_dict which is keyed to the node id and has a value which is a dict with keys 'sources' and 'targets'. The 'sources' and 'targets' each contain a list of tuples (i, polarity, source) edge of the node....
def _check_local_option(self, option): if not self.telnet_opt_dict.has_key(option): self.telnet_opt_dict[option] = TelnetOption() return self.telnet_opt_dict[option].local_option
Test the status of local negotiated Telnet options.
def raw(self, query: Any, data: Any = None): assert isinstance(query, str) input_db = self.conn['data'][self.schema_name] result = None try: query = query.replace("'", "\"") criteria = json.loads(query) for key, value in criteria.items(): ...
Run raw query on Repository. For this stand-in repository, the query string is a json string that contains kwargs criteria with straigh-forward equality checks. Individual criteria are always ANDed and the result is always a subset of the full repository. We will ignore the `data` para...
def ffn_expert_fn(input_size, hidden_sizes, output_size, hidden_activation=tf.nn.relu): def my_fn(x): layer_sizes = [input_size] + hidden_sizes + [output_size] for i in range(1 + len(hidden_sizes)): w = tf.get_variable("w_%d" % i, layer_sizes[i:i+2],...
Returns a function that creates a feed-forward network. Use this function to create the expert_fn argument to distributed_moe. Args: input_size: an integer hidden_sizes: a list of integers output_size: an integer hidden_activation: a unary function. Returns: a unary function
def _sidConversion(cls, val, **kwargs): if isinstance(val, six.string_types): val = val.split(',') usernames = [] for _sid in val: try: userSid = win32security.LookupAccountSid('', _sid) if userSid[1]: userSid = '{1}\\{0...
converts a list of pysid objects to string representations
def rmtree(path): def handle_remove_readonly(func, path, exc): excvalue = exc[1] if ( func in (os.rmdir, os.remove, os.unlink) and excvalue.errno == errno.EACCES ): os.chmod(path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO) func(path) ...
On windows, rmtree fails for readonly dirs.
def geo_length(arg, use_spheroid=None): op = ops.GeoLength(arg, use_spheroid) return op.to_expr()
Compute length of a geo spatial data Parameters ---------- arg : geometry or geography use_spheroid : default None Returns ------- length : double scalar
def _handle_resps(self, root): resps, bearers = self.get_resps(root) if not resps: return root file_desc = root.xpath( '/tei:teiCorpus/tei:teiHeader/tei:fileDesc', namespaces=constants.NAMESPACES)[0] edition_stmt = etree.Element(TEI + 'editionStmt') ...
Returns `root` with a resp list added to the TEI header and @resp values changed to references.
def total(self): if self._result_cache: return self._result_cache.total return self.all().total
Return the total number of records
def get(self, secret_id): return self.prepare_model(self.client.api.inspect_secret(secret_id))
Get a secret. Args: secret_id (str): Secret ID. Returns: (:py:class:`Secret`): The secret. Raises: :py:class:`docker.errors.NotFound` If the secret does not exist. :py:class:`docker.errors.APIError` If the server ...
def forward_remote( self, remote_port, local_port=None, remote_host="127.0.0.1", local_host="localhost", ): if not local_port: local_port = remote_port tunnels = [] def callback(channel, src_addr_tup, dst_addr_tup): sock = socke...
Open a tunnel connecting ``remote_port`` to the local environment. For example, say you're running a daemon in development mode on your workstation at port 8080, and want to funnel traffic to it from a production or staging environment. In most situations this isn't possible as your of...
def parse_rune_links(html: str) -> dict: soup = BeautifulSoup(html, 'lxml') single_page_raw = soup.find_all('li', class_='champion') single_page = {re.split('\W+', x.a.div.div['style'])[-3].lower(): [x.a['href']] for x in single_page_raw if x.a is not None} double_page_raw = soup....
A function which parses the main Runeforge website into dict format. Parameters ---------- html : str The string representation of the html obtained via a GET request. Returns ------- dict The nested rune_links champ rune pages from runeforge.
def __expire_files(self): self.__files = OrderedDict( item for item in self.__files.items() if not item[1].expired )
Because files are always unclean
def find_by_tooltip(browser, tooltip): return ElementSelector( world.browser, str('//*[@title=%(tooltip)s or @data-original-title=%(tooltip)s]' % dict(tooltip=string_literal(tooltip))), filter_displayed=True, )
Find elements with the given tooltip. :param browser: ``world.browser`` :param tooltip: Tooltip to search for Returns: an :class:`ElementSelector`
def plot_dop(bands, int_max, dop, hund_cu, name): data = ssplt.calc_z(bands, dop, np.arange(0, int_max, 0.1), hund_cu, name) ssplt.plot_curves_z(data, name)
Plot of Quasiparticle weight for N degenerate bands under selected doping shows transition only at half-fill the rest are metallic states
def reset(self, keep_state=False): if not keep_state: self.state = state.ManagerState(state.MANAGER_STATE_PREFIX) self.state.reset() async_to_sync(consumer.run_consumer)(timeout=1) async_to_sync(self.sync_counter.reset)()
Reset the shared state and drain Django Channels. :param keep_state: If ``True``, do not reset the shared manager state (useful in tests, where the settings overrides need to be kept). Defaults to ``False``.
def match(self, **kwargs): if kwargs: if self.definition.get('model') is None: raise ValueError("match() with filter only available on relationships with a model") output = process_filter_args(self.definition['model'], kwargs) if output: self.f...
Traverse relationships with properties matching the given parameters. e.g: `.match(price__lt=10)` :param kwargs: see `NodeSet.filter()` for syntax :return: self
def for_window(cls, window): utcnow = datetime.datetime.utcnow() return cls(utcnow - window, 0)
Given a timedelta window, return a timestamp representing that time.
def color_split_position(self): return self.get_text_width(' ') + self.label_width + \ int(float(self.font_width) * float(self.num_padding_chars))
The SVG x position where the color split should occur.
def kick(self, channel, nick, message=None): self.send("KICK", channel, nick, ":%s" % (message or self.user.nick))
Attempt to kick a user from a channel. If a message is not provided, defaults to own nick.
def itervalues(self, key_type=None): if(key_type is not None): intermediate_key = str(key_type) if intermediate_key in self.__dict__: for direct_key in self.__dict__[intermediate_key].values(): yield self.items_dict[direct_key] else: ...
Returns an iterator over the dictionary's values. @param key_type if specified, iterator will be returning only values pointed by keys of this type. Otherwise (if not specified) all values in this dictinary will be generated.
def extract_mean_or_value(cls, obs_or_pred, key=None): result = None if not isinstance(obs_or_pred, dict): result = obs_or_pred else: keys = ([key] if key is not None else []) + ['mean', 'value'] for k in keys: if k in obs_or_pred: ...
Extracts the mean, value, or user-provided key from an observation or prediction dictionary.
def subprocess_manager(self, exec_args): try: sp = gevent.subprocess.Popen(exec_args, stdout=gevent.subprocess.PIPE, stderr=gevent.subprocess.PIPE) except OSError: raise RuntimeError('Could not run bro executable (either not installed or not in path): %s' % (exec_args)) o...
Bro subprocess manager
def _whatsnd(data): hdr = data[:512] fakefile = BytesIO(hdr) for testfn in sndhdr.tests: res = testfn(hdr, fakefile) if res is not None: return _sndhdr_MIMEmap.get(res[0]) return None
Try to identify a sound file type. sndhdr.what() has a pretty cruddy interface, unfortunately. This is why we re-do it here. It would be easier to reverse engineer the Unix 'file' command and use the standard 'magic' file, as shipped with a modern Unix.
def __set_default_ui_state(self, *args): LOGGER.debug("> Setting default View state!") if not self.model(): return self.expandAll() for column in range(len(self.model().horizontal_headers)): self.resizeColumnToContents(column)
Sets the Widget default ui state. :param \*args: Arguments. :type \*args: \*
def describe(self): desc = { 'name': self.name, 'description': self.description, 'type': self.type or 'unknown', } for attr in ['min', 'max', 'allowed', 'default']: v = getattr(self, attr) if v is not None: desc[attr] = ...
Information about this parameter
def get(self, name, default=None): session = self.__get_session_from_db() return session.get(name, default)
Gets the object for "name", or None if there's no such object. If "default" is provided, return it if no object is found.
def get_instance(page_to_crawl): global _instances if isinstance(page_to_crawl, basestring): uri = page_to_crawl page_to_crawl = crawlpage.get_instance(uri) elif isinstance(page_to_crawl, crawlpage.CrawlPage): uri = page_to_crawl.uri else: raise TypeError( "ge...
Return an instance of CrawlModel.
def clean(self, value): if ( self.base_type is not None and value is not None and not isinstance(value, self.base_type) ): if isinstance(self.base_type, tuple): allowed_types = [typ.__name__ for typ in self.base_type] allowe...
Take a dirty value and clean it.
def CaptureVariablesList(self, items, depth, empty_message, limits): v = [] for name, value in items: if (self._total_size >= self.max_size) or ( len(v) >= limits.max_list_items): v.append({ 'status': { 'refersTo': 'VARIABLE_VALUE', 'descriptio...
Captures list of named items. Args: items: iterable of (name, value) tuples. depth: nested depth of dictionaries and vectors for items. empty_message: info status message to set if items is empty. limits: Per-object limits for capturing variable data. Returns: List of formatted v...
def parse_config(args): config_path = path.expanduser(args.config_file) if not path.exists(config_path): if args.config_file != DEFAULT_JOURNAL_RC: print("journal: error: config file '" + args.config_file + "' not found") sys.exit() else: return DEFAULT_JOURNA...
Try to load config, to load other journal locations Otherwise, return default location Returns journal location
def _prune_hit(hit, model): hit_id = hit["_id"] hit_index = hit["_index"] if model.objects.in_search_queryset(hit_id, index=hit_index): logger.debug( "%s with id=%s exists in the '%s' index queryset.", model, hit_id, hit_index ) return None else: logger.debug(...
Check whether a document should be pruned. This method uses the SearchDocumentManagerMixin.in_search_queryset method to determine whether a 'hit' (search document) should be pruned from an index, and if so it returns the hit as a Django object(id=hit_id). Args: hit: dict object the represents ...
def delete_group(self, group_id, keep_non_orphans=False, keep_orphans=False): params = {'keepNonOrphans': str(keep_non_orphans).lower(), 'keepOrphans': str(keep_orphans).lower()} self._delete(self._service_url(['triggers', 'groups', group_id], params=params))
Delete a group trigger :param group_id: ID of the group trigger to delete :param keep_non_orphans: if True converts the non-orphan member triggers to standard triggers :param keep_orphans: if True converts the orphan member triggers to standard triggers
def predicates_overlap(tags1: List[str], tags2: List[str]) -> bool: pred_ind1 = get_predicate_indices(tags1) pred_ind2 = get_predicate_indices(tags2) return any(set.intersection(set(pred_ind1), set(pred_ind2)))
Tests whether the predicate in BIO tags1 overlap with those of tags2.
def calibrate(self): if self._driver and self._driver.is_connected(): self._driver.probe_plate() self._engaged = False
Calibration involves probing for top plate to get the plate height
def match_level(self, overlay): slice_width = len(self._pattern_spec) if slice_width > len(overlay): return None best_lvl, match_slice = (0, None) for i in range(len(overlay)-slice_width+1): overlay_slice = overlay.values()[i:i+slice_width] lvl = self._slice_match...
Given an overlay, return the match level and applicable slice of the overall overlay. The level an integer if there is a match or None if there is no match. The level integer is the number of matching components. Higher values indicate a stronger match.
def drop_nan(self, col: str=None, method: str="all", **kwargs): try: if col is None: self.df = self.df.dropna(how=method, **kwargs) else: self.df = self.df[self.df[col].notnull()] except Exception as e: self.err(e, "Error dropping nan v...
Drop rows with NaN values from the main dataframe :param col: name of the column, defaults to None. Drops in a...
def load(self, mdl_file): import dill as pickle mdl_file_e = op.expanduser(mdl_file) sv = pickle.load(open(mdl_file_e, "rb")) self.mdl = sv["mdl"] self.modelparams.update(sv["modelparams"]) logger.debug("loaded model from path: " + mdl_file_e)
load model from file. fv_type is not set with this function. It is expected to set it before.
def home_wins(self): try: wins, losses = re.findall(r'\d+', self._home_record) return wins except ValueError: return 0
Returns an ``int`` of the number of games the home team won after the conclusion of the game.
def cli_opts(): parser = argparse.ArgumentParser() parser.add_argument( "--homeassistant-config", type=str, required=False, dest="config", help="Create configuration section for home assistant",) parser.add_argument( "-f", "--filter", type=str,...
Handle the command line options
def handle_triple(self, lhs, relation, rhs): relation = relation.replace(':', '', 1) if self.is_relation_inverted(relation): source, target, inverted = rhs, lhs, True relation = self.invert_relation(relation) else: source, target, inverted = lhs, rhs, False ...
Process triples before they are added to the graph. Note that *lhs* and *rhs* are as they originally appeared, and may be inverted. Inversions are detected by is_relation_inverted() and de-inverted by invert_relation(). By default, this function: * removes initial colons on re...
def list_vrf(self, auth, spec=None): if spec is None: spec = {} self._logger.debug("list_vrf called; spec: %s" % unicode(spec)) sql = "SELECT * FROM ip_net_vrf" params = list() if spec is not None and not {}: where, params = self._expand_vrf_spec(spec) ...
Return a list of VRFs matching `spec`. * `auth` [BaseAuth] AAA options. * `spec` [vrf_spec] A VRF specification. If omitted, all VRFs are returned. Returns a list of dicts. This is the documentation of the internal backend function. It's...
def _get_internal_field_by_name(self, name): field = self._all_fields.get(name, self._all_fields.get('%s.%s' % (self._full_name, name))) if field is not None: return field for field_name in self._all_fields: if field_name.endswith('.%s' % name): return sel...
Gets the field by name, or None if not found.
def _ProcessArtifactFilesSource(self, source): if source.path_type != rdf_paths.PathSpec.PathType.OS: raise ValueError("Only supported path type is OS.") paths = [] pathspec_attribute = source.base_source.attributes.get("pathspec_attribute") for source_result_list in self._ProcessSources( ...
Get artifact responses, extract paths and send corresponding files.
def _read_depth_images(self, num_images): depth_images = self._ros_read_images(self._depth_image_buffer, num_images, self.staleness_limit) for i in range(0, num_images): depth_images[i] = depth_images[i] * MM_TO_METERS if self._flip_images: depth_images[i] = np.fl...
Reads depth images from the device
def get(self, id_): if self.api.queue_exists(id_): return Queue(self, {"queue": {"name": id_, "id_": id_}}, key="queue") raise exc.NotFound("The queue '%s' does not exist." % id_)
Need to customize, since Queues are not returned with normal response bodies.
def get_text(self, text): if sys.maxunicode == 0xffff: return text[self.offset:self.offset + self.length] if not isinstance(text, bytes): entity_text = text.encode('utf-16-le') else: entity_text = text entity_text = entity_text[self.offset * 2:(self.of...
Get value of entity :param text: full text :return: part of text
def graph_from_edges(edge_list, node_prefix='', directed=False): if edge_list is None: edge_list = [] graph_type = "digraph" if directed else "graph" with_prefix = functools.partial("{0}{1}".format, node_prefix) graph = Dot(graph_type=graph_type) for src, dst in edge_list: src = with...
Creates a basic graph out of an edge list. The edge list has to be a list of tuples representing the nodes connected by the edge. The values can be anything: bool, int, float, str. If the graph is undirected by default, it is only calculated from one of the symmetric halves of the matrix.
def setServer(self, server): if server == 'live': self.__server__ = server self.__server_url__ = 'api.sense-os.nl' self.setUseHTTPS() return True elif server == 'dev': self.__server__ = server self.__server_url__ = 'api.dev....
Set server to interact with. @param server (string) - 'live' for live server, 'dev' for test server, 'rc' for release candidate @return (boolean) - Boolean indicating whether setServer succeeded
def _GetNextPath(self): paths = sorted(path for path in io_wrapper.ListDirectoryAbsolute(self._directory) if self._path_filter(path)) if not paths: return None if self._path is None: return paths[0] if not io_wrapper.IsCloudPath(paths[0]) and not self._o...
Gets the next path to load from. This function also does the checking for out-of-order writes as it iterates through the paths. Returns: The next path to load events from, or None if there are no more paths.
def get_content(self, key): LOGGER.debug("> Retrieving '{0}' content from the cache.".format(self.__class__.__name__, key)) return self.get(key)
Gets given content from the cache. Usage:: >>> cache = Cache() >>> cache.add_content(John="Doe", Luke="Skywalker") True >>> cache.get_content("Luke") 'Skywalker' :param key: Content to retrieve. :type key: object :return: Con...
def construct_makeblastdb_cmd( filename, outdir, blastdb_exe=pyani_config.MAKEBLASTDB_DEFAULT ): title = os.path.splitext(os.path.split(filename)[-1])[0] outfilename = os.path.join(outdir, os.path.split(filename)[-1]) return ( "{0} -dbtype nucl -in {1} -title {2} -out {3}".format( bl...
Returns a single makeblastdb command. - filename - input filename - blastdb_exe - path to the makeblastdb executable
def initialize_from_sql_cursor(self, sqlcursor): tuples = 0 data = sqlcursor.fetchmany() while 0 < len(data): for entry in data: self.add_entry(str(entry[0]), entry[1]) data = sqlcursor.fetchmany() self._normalized = self._check_normalization ...
Initializes the TimeSeries's data from the given SQL cursor. You need to set the time stamp format using :py:meth:`TimeSeries.set_timeformat`. :param SQLCursor sqlcursor: Cursor that was holds the SQL result for any given "SELECT timestamp, value, ... FROM ..." SQL query. On...
def create_treeitem(self, ): p = self.get_parent() root = self.get_root() if p: pitem = p.get_treeitem() else: pitem = root.get_rootitem() idata = root.create_itemdata(self) item = TreeItem(idata, parent=pitem) return item
Create a new treeitem for this reftrack instance. .. Note:: Parent should be set, Parent should already have a treeitem. If there is no parent, the root tree item is used as parent for the treeitem. :returns: a new treeitem that contains a itemdata with the reftrack instanec. ...
def _ior(self, other): if not isinstance(other, _basebag): other = self._from_iterable(other) for elem, other_count in other.counts(): old_count = self.count(elem) new_count = max(other_count, old_count) self._set_count(elem, new_count) return self
Set multiplicity of each element to the maximum of the two collections. if isinstance(other, _basebag): This runs in O(other.num_unique_elements()) else: This runs in O(len(other))
def histogram_voltage(self, timestep=None, title=True, **kwargs): data = self.network.results.v_res() if title is True: if timestep is not None: title = "Voltage histogram for time step {}".format(timestep) else: title = "Voltage histogram \nfor ti...
Plots histogram of voltages. For more information see :func:`edisgo.tools.plots.histogram`. Parameters ---------- timestep : :pandas:`pandas.Timestamp<timestamp>` or None, optional Specifies time step histogram is plotted for. If timestep is None all time steps ...
def next_sibling(self): if self.parent is None: return None for i, child in enumerate(self.parent.children): if child is self: try: return self.parent.children[i+1] except IndexError: return None
The node immediately following the invocant in their parent's children list. If the invocant does not have a next sibling, it is None
def create_move(project, resource, offset=None): if offset is None: return MoveModule(project, resource) this_pymodule = project.get_pymodule(resource) pyname = evaluate.eval_location(this_pymodule, offset) if pyname is not None: pyobject = pyname.get_object() if isinstance(pyobj...
A factory for creating Move objects Based on `resource` and `offset`, return one of `MoveModule`, `MoveGlobal` or `MoveMethod` for performing move refactoring.
def _createJobStateFile(self): jobStateFile = os.path.join(self.localTempDir, '.jobState') jobState = {'jobPID': os.getpid(), 'jobName': self.jobName, 'jobDir': self.localTempDir, 'deferredFunctions': []} with open(jobStateFile + '.tmp'...
Create the job state file for the current job and fill in the required values. :return: Path to the job state file :rtype: str
def calc_et0_v1(self): con = self.parameters.control.fastaccess inp = self.sequences.inputs.fastaccess flu = self.sequences.fluxes.fastaccess for k in range(con.nhru): flu.et0[k] = (con.ke[k]*(((8.64*inp.glob+93.*con.kf[k]) * (flu.tkor[k]+22.)) / ...
Calculate reference evapotranspiration after Turc-Wendling. Required control parameters: |NHRU| |KE| |KF| |HNN| Required input sequence: |Glob| Required flux sequence: |TKor| Calculated flux sequence: |ET0| Basic equation: :math:`ET0 = KE \\cdot ...
def update_input(filelist, ivmlist=None, removed_files=None): newfilelist = [] if removed_files == []: return filelist, ivmlist else: sci_ivm = list(zip(filelist, ivmlist)) for f in removed_files: result=[sci_ivm.remove(t) for t in sci_ivm if t[0] == f ] ivmlist =...
Removes files flagged to be removed from the input filelist. Removes the corresponding ivm files if present.
def seq_md5(seq, normalize=True): seq = normalize_sequence(seq) if normalize else seq bseq = seq.encode("ascii") return hashlib.md5(bseq).hexdigest()
returns unicode md5 as hex digest for sequence `seq`. >>> seq_md5('') 'd41d8cd98f00b204e9800998ecf8427e' >>> seq_md5('ACGT') 'f1f8f4bf413b16ad135722aa4591043e' >>> seq_md5('ACGT*') 'f1f8f4bf413b16ad135722aa4591043e' >>> seq_md5(' A C G T ') 'f1f8f4bf413b16ad135722aa4591043e' >>>...
def set_path(self, file_path): if not file_path: self.read_data = self.memory_read self.write_data = self.memory_write elif not is_valid(file_path): self.write_data(file_path, {}) self.path = file_path
Set the path of the database. Create the file if it does not exist.
def close(self): self._serial.write(b"@c") self._serial.read() self._serial.close()
Closes the connection to the serial port and ensure no pending operatoin are left
def get_hoisted(dct, child_name): child = dct[child_name] del dct[child_name] dct.update(child) return dct
Pulls all of a child's keys up to the parent, with the names unchanged.
def exec_func(code, glob_vars, loc_vars=None): if loc_vars is None: exec(code, glob_vars) else: exec(code, glob_vars, loc_vars)
Wrapper around exec.
def validate_root_vertex_directives(root_ast): directives_present_at_root = set() for directive_obj in root_ast.directives: directive_name = directive_obj.name.value if is_filter_with_outer_scope_vertex_field_operator(directive_obj): raise GraphQLCompilationError(u'Found a filter dir...
Validate the directives that appear at the root vertex field.
def _pretty_time_delta(td): seconds = td.total_seconds() sign_string = '-' if seconds < 0 else '' seconds = abs(int(seconds)) days, seconds = divmod(seconds, 86400) hours, seconds = divmod(seconds, 3600) minutes, seconds = divmod(seconds, 60) d = dict(sign=sign_string, days=days, hours=hours...
Creates a string representation of a time delta. Parameters ---------- td : :class:`datetime.timedelta` Returns ------- pretty_formatted_datetime : str
def _FlushInput(self): self.ser.flush() flushed = 0 while True: ready_r, ready_w, ready_x = select.select([self.ser], [], [self.ser], 0) if len(ready_x) > 0: logging.error("Exception from serial port.")...
Flush all read data until no more available.
def get_from_layer(self, name, layer=None): if name not in self._children: if self._frozen: raise KeyError(name) self._children[name] = ConfigTree(layers=self._layers) child = self._children[name] if isinstance(child, ConfigNode): return child....
Get a configuration value from the named layer. Parameters ---------- name : str The name of the value to retrieve layer: str The name of the layer to retrieve the value from. If it is not supplied then the outermost layer in which the key is defined ...
def stats_add_duration(self, key, duration): if not self._measurement: if not self.IGNORE_OOB_STATS: self.logger.warning( 'stats_add_timing invoked outside execution') return self._measurement.add_duration(key, duration)
Add a duration to the per-message measurements .. versionadded:: 3.19.0 .. note:: If this method is called when there is not a message being processed, a message will be logged at the ``warning`` level to indicate the value is being dropped. To suppress these warnings, ...
def humanize_bytes(bytesize, precision=2): abbrevs = ( (1 << 50, 'PB'), (1 << 40, 'TB'), (1 << 30, 'GB'), (1 << 20, 'MB'), (1 << 10, 'kB'), (1, 'bytes') ) if bytesize == 1: return '1 byte' for factor, suffix in abbrevs: if bytesize >= facto...
Humanize byte size figures https://gist.github.com/moird/3684595
def photos_search(user_id='', auth=False, tags='', tag_mode='', text='',\ min_upload_date='', max_upload_date='',\ min_taken_date='', max_taken_date='', \ license='', per_page='', page='', sort=''): method = 'flickr.photos.search' data = _doget(method, auth...
Returns a list of Photo objects. If auth=True then will auth the user. Can see private etc
def get_name(node): if isinstance(node, gast.Name): return node.id elif isinstance(node, (gast.Subscript, gast.Attribute)): return get_name(node.value) else: raise TypeError
Get the name of a variable. Args: node: A `Name`, `Subscript` or `Attribute` node. Returns: The name of the variable e.g. `'x'` for `x`, `x.i` and `x[i]`.
def download_artifact_bundle(self, id_or_uri, file_path): uri = self.DOWNLOAD_PATH + '/' + extract_id_from_uri(id_or_uri) return self._client.download(uri, file_path)
Download the Artifact Bundle. Args: id_or_uri: ID or URI of the Artifact Bundle. file_path(str): Destination file path. Returns: bool: Successfully downloaded.
def update(self, data): self.name = data["name"] self.description = data['description'] self.win_index = data['win_index'] if conf.use_winpcapy: self._update_pcapdata() try: self.ip = socket.inet_ntoa(get_if_raw_addr(data['guid'])) except (KeyError...
Update info about network interface according to given dnet dictionary
def live_scores(self, live_scores): headers = ['League', 'Home Team Name', 'Home Team Goals', 'Away Team Goals', 'Away Team Name'] result = [headers] result.extend([game['league'], game['homeTeamName'], game['goalsHomeTeam'], game['goalsAwayTeam'], ...
Store output of live scores to a CSV file
def allpathsX(args): p = OptionParser(allpathsX.__doc__) opts, args = p.parse_args(args) if len(args) != 2: sys.exit(not p.print_help()) folder, tag = args tag = tag.split(",") for p, pf in iter_project(folder): assemble_pairs(p, pf, tag)
%prog allpathsX folder tag Run assembly on a folder of paired reads and apply tag (PE-200, PE-500). Allow multiple tags separated by comma, e.g. PE-350,TT-1050
def stop_gradient(input_layer): if input_layer.is_sequence(): result = [tf.stop_gradient(t) for t in input_layer.sequence] return input_layer.with_sequence(result) else: return tf.stop_gradient(input_layer)
Cuts off the gradient at this point. This works on both sequence and regular Pretty Tensors. Args: input_layer: The input. Returns: A new Pretty Tensor of the same type with stop_gradient applied.
def gff(args): p = OptionParser(gff.__doc__) opts, args = p.parse_args(args) if len(args) != 1: sys.exit(not p.print_help()) gbkfile, = args MultiGenBank(gbkfile)
%prog gff seq.gbk Convert Genbank file to GFF and FASTA file. The Genbank file can contain multiple records.
def validate_output(schema): location = get_callsite_location() def decorator(fn): validate_schema(schema) wrapper = wrap_response(fn, schema) record_schemas( fn, wrapper, location, response_schema=sort_schema(schema)) return wrapper return decorator
Validate the body of a response from a flask view. Like `validate_body`, this function compares a json document to a jsonschema specification. However, this function applies the schema to the view response. Instead of the view returning a flask response object, it should instead return a Python li...
def _should_fuzz_node(self, fuzz_node, stage): if stage == ClientFuzzer.STAGE_ANY: return True if fuzz_node.name.lower() == stage.lower(): if self._index_in_path == len(self._fuzz_path) - 1: return True else: return False
The matching stage is either the name of the last node, or ClientFuzzer.STAGE_ANY. :return: True if we are in the correct model node
def set_stop_chars(self, stop_chars): warnings.warn("Method set_stop_chars is deprecated, " "use `set_stop_chars_left` or " "`set_stop_chars_right` instead", DeprecationWarning) self._stop_chars = set(stop_chars) self._stop_chars_left = self._stop_char...
Set stop characters used when determining end of URL. .. deprecated:: 0.7 Use :func:`set_stop_chars_left` or :func:`set_stop_chars_right` instead. :param list stop_chars: list of characters
def show(self, frame): if len(frame.shape) != 3: raise ValueError('frame should have shape with only 3 dimensions') if not self.is_open: self.open() self._window.clear() self._window.switch_to() self._window.dispatch_events() image = ImageData( ...
Show an array of pixels on the window. Args: frame (numpy.ndarray): the frame to show on the window Returns: None
def update(self, **kwargs): if self.condition is not None: self.result = self.do_(self.model.table.update().where(self.condition).values(**kwargs)) else: self.result = self.do_(self.model.table.update().values(**kwargs)) return self.result
Execute update table set field = field+1 like statement
def mkdir(dir_path): if not os.path.isdir(dir_path) or not os.path.exists(dir_path): os.makedirs(dir_path)
Make directory if not existed
def _remove_last(votes, fpl, cl, ranking): for v in votes: for r in v: if r == fpl[-1]: v.remove(r) for c in cl: if c == fpl[-1]: if c not in ranking: ranking.append((c, len(ranking) + 1))
Remove last candidate in IRV voting.
def calldata(vcf_fn, region=None, samples=None, ploidy=2, fields=None, exclude_fields=None, dtypes=None, arities=None, fills=None, vcf_types=None, count=None, progress=0, logstream=None, condition=None, slice_args=None, verbose=True, cache=False, cachedir=None, skip_c...
Load a numpy 1-dimensional structured array with data from the sample columns of a VCF file. Parameters ---------- vcf_fn: string or list Name of the VCF file or list of file names. region: string Region to extract, e.g., 'chr1' or 'chr1:0-100000'. fields: list or array-like ...
def flags(self, index): activeFlags = (Qt.ItemIsEnabled | Qt.ItemIsSelectable | Qt.ItemIsUserCheckable) item = self.item(index) column = index.column() if column > 0 and not item.childCount(): activeFlags = activeFlags | Qt.ItemIsEditable return...
Return the active flags for the given index. Add editable flag to items other than the first column.
def _check_minions_directories(pki_dir): minions_accepted = os.path.join(pki_dir, salt.key.Key.ACC) minions_pre = os.path.join(pki_dir, salt.key.Key.PEND) minions_rejected = os.path.join(pki_dir, salt.key.Key.REJ) minions_denied = os.path.join(pki_dir, salt.key.Key.DEN) return minions_accepted, mini...
Return the minion keys directory paths. This function is a copy of salt.key.Key._check_minions_directories.