code
stringlengths
51
2.34k
docstring
stringlengths
11
171
def writelines(self, lines, fmt): if isinstance(fmt, basestring): fmt = [fmt] * len(lines) for f, line in zip(fmt, lines): self.writeline(f, line, self.endian)
Write `lines` with given `format`.
def row_coordinates(self, X): utils.validation.check_is_fitted(self, 'V_') _, row_names, _, _ = util.make_labels_and_names(X) if isinstance(X, pd.SparseDataFrame): X = X.to_coo().astype(float) elif isinstance(X, pd.DataFrame): X = X.to_numpy() if self.copy: X = X.copy() if isinstance(X, np.ndarray): X = X / X.sum(axis=1)[:, None] else: X = X / X.sum(axis=1) return pd.DataFrame( data=X @ sparse.diags(self.col_masses_.to_numpy() ** -0.5) @ self.V_.T, index=row_names )
The row principal coordinates.
def _parseKeyName(self, name): if name.endswith(Store.theInfoExtension): return {'type': 'info'} match = self.keyPattern.match(name) if not match: return None match = match.groupdict() match.update(type='diff') return match
Returns dict with fullpath, to, from.
def encode_7or8bit(msg): orig = msg.get_payload() if orig is None: msg['Content-Transfer-Encoding'] = '7bit' return try: if isinstance(orig, str): orig.encode('ascii') else: orig.decode('ascii') except UnicodeError: charset = msg.get_charset() output_cset = charset and charset.output_charset if output_cset and output_cset.lower().startswith('iso-2022-'): msg['Content-Transfer-Encoding'] = '7bit' else: msg['Content-Transfer-Encoding'] = '8bit' else: msg['Content-Transfer-Encoding'] = '7bit' if not isinstance(orig, str): msg.set_payload(orig.decode('ascii', 'surrogateescape'))
Set the Content-Transfer-Encoding header to 7bit or 8bit.
def should_skip(filename, config, path='/'): for skip_path in config['skip']: if posixpath.abspath(posixpath.join(path, filename)) == posixpath.abspath(skip_path.replace('\\', '/')): return True position = os.path.split(filename) while position[1]: if position[1] in config['skip']: return True position = os.path.split(position[0]) for glob in config['skip_glob']: if fnmatch.fnmatch(filename, glob): return True return False
Returns True if the file should be skipped based on the passed in settings.
def _water(cls, T, P): water = IAPWS95(P=P, T=T) prop = {} prop["g"] = water.h-T*water.s prop["gt"] = -water.s prop["gp"] = 1./water.rho prop["gtt"] = -water.cp/T prop["gtp"] = water.betas*water.cp/T prop["gpp"] = -1e6/(water.rho*water.w)**2-water.betas**2*1e3*water.cp/T prop["gs"] = 0 prop["gsp"] = 0 prop["thcond"] = water.k return prop
Get properties of pure water, Table4 pag 8
def inheritance_diagram_directive(name, arguments, options, content, lineno, content_offset, block_text, state, state_machine): node = inheritance_diagram() class_names = arguments graph = InheritanceGraph(class_names) for name in graph.get_all_class_names(): refnodes, x = xfileref_role( 'class', ':class:`%s`' % name, name, 0, state) node.extend(refnodes) node['graph'] = graph node['parts'] = options.get('parts', 0) node['content'] = " ".join(class_names) return [node]
Run when the inheritance_diagram directive is first encountered.
def list_kinesis_applications(region, filter_by_kwargs): conn = boto.kinesis.connect_to_region(region) streams = conn.list_streams()['StreamNames'] kinesis_streams = {} for stream_name in streams: shard_ids = [] shards = conn.describe_stream(stream_name)['StreamDescription']['Shards'] for shard in shards: shard_ids.append(shard['ShardId']) kinesis_streams[stream_name] = shard_ids return kinesis_streams
List all the kinesis applications along with the shards for each stream
def defaultSystem(): rsystem = platform.system() if rsystem in os_canon: rsystem = os_canon[rsystem][0] return rsystem
Return the canonicalized system name.
def _get_encoding(): encoding = sys.getfilesystemencoding() if encoding is None: if is_darwin: encoding = "utf-8" elif is_win: encoding = "mbcs" else: encoding = "ascii" encoding = _normalize_codec(encoding) return encoding
The encoding used for paths, argv, environ, stdout and stdin
def looking_for_pub(self): if self['pub'] != None: return self.sober_in_pub self.debug('I am looking for a pub') group = list(self.get_neighboring_agents()) for pub in self.env.available_pubs(): self.debug('We\'re trying to get into {}: total: {}'.format(pub, len(group))) if self.env.enter(pub, self, *group): self.info('We\'re all {} getting in {}!'.format(len(group), pub)) return self.sober_in_pub
Look for a pub that accepts me and my friends
def capture_logger(name): import logging logger = logging.getLogger(name) try: import StringIO stream = StringIO.StringIO() except ImportError: from io import StringIO stream = StringIO() handler = logging.StreamHandler(stream) logger.addHandler(handler) try: yield stream finally: logger.removeHandler(handler)
Context manager to capture a logger output with a StringIO stream.
def clear() -> None: LOGGER.debug('clear >>>') with SCHEMA_CACHE.lock: SCHEMA_CACHE.clear() with CRED_DEF_CACHE.lock: CRED_DEF_CACHE.clear() with REVO_CACHE.lock: REVO_CACHE.clear() LOGGER.debug('clear <<<')
Clear all archivable caches in memory.
def sort(self, attr): self.entries = Sorter(self.entries, self.category, attr).sort_entries() return self
Sort the ratings based on an attribute
def _simulate_installation_of(to_install, package_set): installed = set() for inst_req in to_install: dist = make_abstract_dist(inst_req).dist() name = canonicalize_name(dist.key) package_set[name] = PackageDetails(dist.version, dist.requires()) installed.add(name) return installed
Computes the version of packages after installing to_install.
def _adjustment_reactions(self): for reaction_id in self._model.reactions: if not self._model.is_exchange(reaction_id): yield reaction_id
Yield all the non exchange reactions in the model.
def _do(self, nodes): if not isinstance(nodes, dict): raise TypeError('"nodes" must be a dictionary') if not nodes: return starting_num_nodes = len(nodes) newly_instantiated = set() for (name, dependency_set) in six.iteritems(nodes): if dependency_set: continue config = self._config[name] service = self._factory.create_from_dict(config) self._factory.add_instantiated_service(name, service) newly_instantiated.add(name) if not newly_instantiated: raise Exception('No newly instantiated services') for name in newly_instantiated: del nodes[name] if starting_num_nodes == len(nodes): raise Exception('No nodes removed!') for (name, dependency_set) in six.iteritems(nodes): nodes[name] = dependency_set.difference(newly_instantiated) self._do(nodes)
Recursive method to instantiate services
def all_instances(): return sorted([i for i in asyncore.socket_map.values() if isinstance(i, remote_dispatcher.RemoteDispatcher)], key=lambda i: i.display_name or '')
Iterator over all the remote_dispatcher instances
def _default_warning_handler(library_msg, _): library_msg = library_msg.decode('utf-8').rstrip() msg = "OpenJPEG library warning: {0}".format(library_msg) warnings.warn(msg, UserWarning)
Default warning handler callback.
def urljoin(*fragments): parts = [fragment.rstrip('/') for fragment in fragments[:len(fragments) - 1]] parts.append(fragments[-1]) return '/'.join(parts)
Concatenate multi part strings into urls.
def _need_subquery(arg, attributes, named_attributes): if arg.heading.expressions or arg.distinct: return True restricting_attributes = arg.attributes_in_restriction() return (not restricting_attributes.issubset(attributes) or any(v.strip() in restricting_attributes for v in named_attributes.values()))
Decide whether the projection argument needs to be wrapped in a subquery
def parse_block_scalar_indent(token_class): def callback(lexer, match, context): text = match.group() if context.block_scalar_indent is None: if len(text) <= max(context.indent, 0): context.stack.pop() context.stack.pop() return context.block_scalar_indent = len(text) else: if len(text) < context.block_scalar_indent: context.stack.pop() context.stack.pop() return if text: yield match.start(), token_class, text context.pos = match.end() return callback
Process indentation spaces in a block scalar.
def print_warning(cls): if not cls.warning: cls.warning = True print('Can\'t create progress bar:', str(TQDM_IMPORT_ERROR), file=sys.stderr)
Print a missing progress bar warning if it was not printed.
def release_assets(self, release): release = self.as_id(release) return self.get_list(url='%s/%s/assets' % (self, release))
Assets for a given release
def retrieve(self, value): if isinstance(value, string_types): for key, this_value in self.secrets.items(): value = value.replace(key, this_value) elif isinstance(value, MutableMapping): return {k: self.retrieve(v) for k, v in value.items()} elif isinstance(value, MutableSequence): return [self.retrieve(v) for k, v in enumerate(value)] return value
Replace placeholders with their corresponding secrets.
def remove_expired_multipartobjects(): delta = current_app.config['FILES_REST_MULTIPART_EXPIRES'] expired_dt = datetime.utcnow() - delta file_ids = [] for mp in MultipartObject.query_expired(expired_dt): file_ids.append(str(mp.file_id)) mp.delete() for fid in file_ids: remove_file_data.delay(fid)
Remove expired multipart objects.
def user_id(self): if self._user_id is None: if self.has_logged_in: self._user_id = self._req_get_user_aq()['data']['uid'] else: raise AuthenticationError('Not logged in.') return self._user_id
User id of the current API user
def cached_accessor(func_or_att): if callable(func_or_att): att = func_or_att.__name__ return cached_accessor(func_or_att.__name__)(func_or_att) att = func_or_att def make_cached_function(func): @wraps(func) def cached_check_version(self): private_att = '_'+att if getattr(self, private_att): return getattr(self, private_att) setattr(self, private_att, self.load_key(att)) if getattr(self, private_att): return getattr(self, private_att) value = func(self) setattr(self, private_att, value) self.save_key(att, value) return value return cached_check_version return make_cached_function
Decorated function checks in-memory cache and disc cache for att first
def requirements(filename): with open(filename) as f: return [x.strip() for x in f.readlines() if x.strip()]
Reads requirements from a file.
def save_rules(self, op_file): with open(op_file, 'w') as f: for m in self.maps: f.write(m.format_for_file_output())
save the rules to file after web updates or program changes
def repeat(self, rid, count, index=0): elems = None if rid in self.__repeat_ids: elems = self.__repeat_ids[rid] elif rid in self.__element_ids: elems = self.__element_ids if elems and index < len(elems): elem = elems[index] self.__repeat(elem, count)
Repeat an xml element marked with the matching rid.
def remove_nexusnve_binding(vni, switch_ip, device_id): LOG.debug("remove_nexusnve_binding() called") session = bc.get_writer_session() binding = (session.query(nexus_models_v2.NexusNVEBinding). filter_by(vni=vni, switch_ip=switch_ip, device_id=device_id).one()) if binding: session.delete(binding) session.flush() return binding
Remove the nexus nve binding.
def _from_dict(cls, _dict): args = {} if 'score' in _dict: args['score'] = _dict.get('score') if 'sentence' in _dict: args['sentence'] = _dict.get('sentence') if 'type' in _dict: args['type'] = _dict.get('type') if 'arguments' in _dict: args['arguments'] = [ RelationArgument._from_dict(x) for x in (_dict.get('arguments')) ] return cls(**args)
Initialize a RelationsResult object from a json dictionary.
def query_boost_version(boost_root): boost_version = None if os.path.exists(os.path.join(boost_root,'Jamroot')): with codecs.open(os.path.join(boost_root,'Jamroot'), 'r', 'utf-8') as f: for line in f.readlines(): parts = line.split() if len(parts) >= 5 and parts[1] == 'BOOST_VERSION': boost_version = parts[3] break if not boost_version: boost_version = 'default' return boost_version
Read in the Boost version from a given boost_root.
def _init_spark(self): global _SPARK_INITIALIZED spark_home = self.spark_home python_path = self._python_path if use_findspark: if _SPARK_INITIALIZED: if spark_home == os.environ["SPARK_HOME"]: pass else: sys.path.pop(0) sys.path.pop(0) findspark.init(spark_home=spark_home, edit_rc=False, edit_profile=False, python_path=python_path) else: findspark.init(spark_home=spark_home, edit_rc=False, edit_profile=False, python_path=python_path) _SPARK_INITIALIZED = True self._set_environment_variables()
Initializes spark so that pyspark is importable. This also sets up the required environment variables
def _read_cache(self): if os.path.exists(self.filepath): with open(self.filepath, 'rt', encoding='utf8') as fh: lines = [x.strip() for x in fh] else: logger.debug("Index not found, starting empty") lines = [] return lines
Read virtualenv metadata from cache.
def uploads(self): if self._resources is None: self.__init() if "uploads" in self._resources: url = self._url + "/uploads" return _uploads.Uploads(url=url, securityHandler=self._securityHandler, proxy_url=self._proxy_url, proxy_port=self._proxy_port, initialize=True) else: return None
returns an object to work with the site uploads
def _clean_schema_fields(self, fields): fields_sorted = sorted(fields, key=lambda field: field["name"]) return [ {"name": field["name"], "type": field["type"]} for field in fields_sorted ]
Return a sanitized version of the schema for comparisons.
def exit(self) -> None: total = sum(len(logs) for logs in self.logs.values()) if self.json: self.logs['total'] = total print(json.dumps(self.logs, indent=self.indent)) else: for name, log in self.logs.items(): if not log or self.parser[name].as_bool("quiet"): continue print("[[{0}]]".format(name)) getattr(snekchek.format, name + "_format")(log) print("\n") print("-" * 30) print("Total:", total) sys.exit(self.status_code)
Raise SystemExit with correct status code and output logs.
def _process_all_1(self, texts:Collection[str]) -> List[List[str]]: "Process a list of `texts` in one process." tok = self.tok_func(self.lang) if self.special_cases: tok.add_special_cases(self.special_cases) return [self.process_text(str(t), tok) for t in texts]
Process a list of `texts` in one process.
def wrap_handler(cls, handler, protocol, **kwargs): def _wrapper(request, *args, **kwargs): instance = cls(request=request, **kwargs) if protocol == Resource.Protocol.http: return instance._wrap_http(handler, request=request, **kwargs) elif protocol == Resource.Protocol.websocket: return instance._wrap_ws(handler, request=request, **kwargs) elif protocol == Resource.Protocol.amqp: return instance._wrap_amqp(view_type, *args, **kwargs) else: raise Exception('Communication protocol not specified') return _wrapper
Wrap a request handler with the matching protocol handler
def install(self, pip_args=None): if path.isdir(self.env): print_pretty("<FG_RED>This seems to already be installed.<END>") else: print_pretty("<FG_BLUE>Creating environment {}...<END>\n".format(self.env)) self.create_env() self.install_program(pip_args) self.create_links()
Install the program and put links in place.
def add_graph_copy(self, graph, tags=None): with graph.as_default(): meta_graph = tf_v1.train.export_meta_graph(strip_default_attrs=True) _export_tags(meta_graph, tags) _export_signatures(meta_graph) _export_module_attachments(meta_graph) self._proto.meta_graphs.extend([meta_graph])
Adds a copy of Graph with the specified set of tags.
def __get_indexer(in_fns, selected_type=None): indexer = None if selected_type is not None: indexer = get_indexer_by_filetype(selected_type) else: if len(in_fns) == 0: raise IndexError("reading from stdin, unable to guess input file " + "type, use -t option to set manually.\n") else: extension = set([os.path.splitext(f)[1] for f in in_fns]) assert(len(extension) >= 1) if len(extension) > 1: raise IndexError("more than one file extension present, unable " + "to get input type, use -t option to set manually.\n") else: indexer = get_indexer_by_file_extension(list(extension)[0]) assert(indexer is not None) return indexer
Determine which indexer to use based on input files and type option.
def _doClobber(self): rc = yield self.runRmdir(self.workdir, timeout=self.timeout) if rc != RC_SUCCESS: raise RuntimeError("Failed to delete directory") return rc
Remove the work directory
def add_value(self, val:float)->None: "Add `val` to calculate updated smoothed value." self.n += 1 self.mov_avg = self.beta * self.mov_avg + (1 - self.beta) * val self.smooth = self.mov_avg / (1 - self.beta ** self.n)
Add `val` to calculate updated smoothed value.
def create_at_path(self, asset_content, url_path, tags=''): return self._create_asset({ 'asset': b64encode(asset_content), 'url-path': url_path, 'tags': tags, 'type': 'base64' })
Create asset at a specific URL path on the server
def _to_full_dict(xmltree): xmldict = {} for attrName, attrValue in xmltree.attrib.items(): xmldict[attrName] = attrValue if not xmltree.getchildren(): if not xmldict: return xmltree.text elif xmltree.text: xmldict[_conv_name(xmltree.tag)] = xmltree.text for item in xmltree: name = _conv_name(item.tag) if name not in xmldict: xmldict[name] = _to_full_dict(item) else: if not isinstance(xmldict[name], list): xmldict[name] = [xmldict[name]] xmldict[name].append(_to_full_dict(item)) return xmldict
Returns the full XML dictionary including attributes.
def __flush(self): if self.__args is None or self.__disconnecting: return False self.__sigDelayed.emit(self.__args) self.__args = None self.__timer.stop() self.__lastFlushTime = time.time() return True
If there is a signal queued up, send it now.
def tag(self, tag): url = '%s/tags/%s' % (self, tag) response = self.http.get(url, auth=self.auth) response.raise_for_status() return response.json()
Get a release by tag
def _walk_to_root(path): if not os.path.exists(path): raise IOError('Starting path not found') if os.path.isfile(path): path = os.path.dirname(path) last_dir = None current_dir = os.path.abspath(path) while last_dir != current_dir: yield current_dir parent_dir = os.path.abspath(os.path.join(current_dir, os.path.pardir)) last_dir, current_dir = current_dir, parent_dir
Yield directories starting from the given directory up to the root
def constrain (n, min, max): if n < min: return min if n > max: return max return n
This returns a number, n constrained to the min and max bounds.
def end_index(self): paginator = self.paginator if self.number == paginator.num_pages: return paginator.count return (self.number - 1) * paginator.per_page + paginator.first_page
Return the 1-based index of the last item on this page.
def _add_segmetrics_to_output(out, data): out_file = "%s-segmetrics.txt" % os.path.splitext(out["cns"])[0] if not utils.file_exists(out_file): with file_transaction(data, out_file) as tx_out_file: cmd = [os.path.join(os.path.dirname(sys.executable), "cnvkit.py"), "segmetrics", "--median", "--iqr", "--ci", "--pi", "-s", out["cns"], "-o", tx_out_file, out["cnr"]] if dd.get_coverage_interval(data) == "genome": cmd += ["--alpha", "0.1", "--bootstrap", "50"] else: cmd += ["--alpha", "0.01", "--bootstrap", "500"] do.run(cmd, "CNVkit segmetrics") out["segmetrics"] = out_file return out
Add metrics for measuring reliability of CNV estimates.
def _to_python(input_string): python_string = '' for i in range(len(input_string)): if not python_string: python_string += input_string[i].lower() elif input_string[i].isupper(): python_string += '_%s' % input_string[i].lower() else: python_string += input_string[i] return python_string
a helper method to convert camelcase to python
def _sounds_re(include_erhua=False): tone = '[1-5]' optional_final_erhua = '|r\\b' if include_erhua else '' pattern = '({}{}{})'.format(_joined_syllables_re(), tone, optional_final_erhua) return re.compile(pattern, re.IGNORECASE)
Sounds are syllables + tones
def to_dict(self): return {'x': self.x, 'y': self.y, 'height': self.height, 'width': self.width, 'direction': dir.to_string(self.direction)}
Save this location into a dictionary.
def getOutput(self): output = self.stdout if self.stdout: output += '\r\n' output += self.stderr return output
Returns the combined output of stdout and stderr
def iter_lines(self, warn_only=False): remain = "" for data in self.iter_content(LINE_CHUNK_SIZE, warn_only=True): line_break_found = data[-1] in (b"\n", b"\r") lines = data.decode(self.codec).splitlines() lines[0] = remain + lines[0] if not line_break_found: remain = lines.pop() for line in lines: yield line if remain: yield remain self._state = FINISHED if not warn_only: self.raise_for_error()
yields stdout text, line by line.
def fbeta(y_pred:Tensor, y_true:Tensor, thresh:float=0.2, beta:float=2, eps:float=1e-9, sigmoid:bool=True)->Rank0Tensor: "Computes the f_beta between `preds` and `targets`" beta2 = beta ** 2 if sigmoid: y_pred = y_pred.sigmoid() y_pred = (y_pred>thresh).float() y_true = y_true.float() TP = (y_pred*y_true).sum(dim=1) prec = TP/(y_pred.sum(dim=1)+eps) rec = TP/(y_true.sum(dim=1)+eps) res = (prec*rec)/(prec*beta2+rec+eps)*(1+beta2) return res.mean()
Computes the f_beta between `preds` and `targets`
def Search(self, text): if isinstance(text, rdfvalue.RDFString): text = str(text) return self._regex.search(text)
Search the text for our value.
def open(self): if self._is_open: raise HIDException("Failed to open device: HIDDevice already open") path = self.path.encode('utf-8') dev = hidapi.hid_open_path(path) if dev: self._is_open = True self._device = dev else: raise HIDException("Failed to open device")
Open the HID device for reading and writing.
def build_joblist(jobgraph): jobset = set() for job in jobgraph: jobset = populate_jobset(job, jobset, depth=1) return list(jobset)
Returns a list of jobs, from a passed jobgraph.
def recursive_insert(self, node, coord, data, start, end): if node[0] != -1: left = (start, node[0]) right = (node[0], end) if self.is_within(left, coord): node[1][-1].append(data) elif self.overlap(left, coord): self.recursive_insert(node[1], coord, data, left[0], left[1]) if self.is_within(right, coord): node[2][-1].append(data) elif self.overlap(right, coord): self.recursive_insert(node[2], coord, data, right[0], right[1])
Recursively inserts id data into nodes
def upgradestep(upgrade_product, version): def wrap_func(fn): def wrap_func_args(context, *args): p = getToolByName(context, 'portal_quickinstaller').get(upgrade_product) setattr(p, 'installedversion', version) return fn(context, *args) return wrap_func_args return wrap_func
Decorator for updating the QuickInstaller of a upgrade
def error(self, message, print_help=False): if 'too few arguments' in message.lower(): target = sys.argv.pop(0) sys.argv.insert( 0, os.path.basename(target) or os.path.relpath(target)) message = ("%s. Try getting help with `%s --help`" % (message, " ".join(sys.argv))) if print_help: self.print_help() else: self.print_usage() sys.stderr.write('\nerror: %s\n' % message) sys.exit(2)
Provide a more helpful message if there are too few arguments.
def add_block_as_child_node(self, block, node): child = etree.SubElement(node, "unknown") block.add_xml_to_node(child)
Export `block` as a child node of `node`.
def text(self, prompt, default=None): prompt = prompt if prompt is not None else 'Enter some text' prompt += " [{0}]: ".format(default) if default is not None else ': ' return self.input(curry(filter_text, default=default), prompt)
Prompts the user for some text, with optional default
def build_caching_info_message(job_spec, job_id, workflow_workspace, workflow_json, result_path): caching_info_message = { "job_spec": job_spec, "job_id": job_id, "workflow_workspace": workflow_workspace, "workflow_json": workflow_json, "result_path": result_path } return caching_info_message
Build the caching info message with correct formatting.
def random(self, namespace=0): query = self.LIST.substitute( WIKI=self.uri, ENDPOINT=self.endpoint, LIST='random') query += "&rnlimit=1&rnnamespace=%d" % namespace emoji = [ u'\U0001f32f', u'\U0001f355', u'\U0001f35c', u'\U0001f363', u'\U0001f369', u'\U0001f36a', u'\U0001f36d', u'\U0001f370', ] action = 'random' if namespace: action = 'random:%d' % namespace self.set_status(action, random.choice(emoji)) return query
Returns query string for random page
def _is_locked(self): if os.path.isfile(self._lck): try: import psutil except ImportError: return True with open(self._lck) as f: pid = f.read() return True if psutil.pid_exists(int(pid)) else False else: return False
Checks to see if we are already pulling items from the queue
def base64_decodestring(instr): b = salt.utils.stringutils.to_bytes(instr) try: decoded = base64.decodebytes(b) except AttributeError: decoded = base64.decodestring(b) try: return salt.utils.stringutils.to_unicode( decoded, encoding='utf8' if salt.utils.platform.is_windows() else None ) except UnicodeDecodeError: return decoded
Decode a base64-encoded string using the "legacy" Python interface.
def fingers_needed(fingering): split = False indexfinger = False minimum = min(finger for finger in fingering if finger) result = 0 for finger in reversed(fingering): if finger == 0: split = True else: if not split and finger == minimum: if not indexfinger: result += 1 indexfinger = True else: result += 1 return result
Return the number of fingers needed to play the given fingering.
def parse_parent(docname): lineage = docname.split('/') lineage_count = len(lineage) if docname == 'index': parent = None elif lineage_count == 1: parent = 'index' elif lineage_count == 2 and lineage[-1] == 'index': parent = 'index' elif lineage_count == 2: parent = lineage[0] + '/index' elif lineage[-1] == 'index': parent = '/'.join(lineage[:-2]) + '/index' else: parent = '/'.join(lineage[:-1]) + '/index' return parent
Given a docname path, pick apart and return name of parent
def camel_to_(s): s1 = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', s) return re.sub('([a-z0-9])([A-Z])', r'\1_\2', s1).lower()
Convert CamelCase to camel_case
async def jsk_tasks(self, ctx: commands.Context): if not self.tasks: return await ctx.send("No currently running tasks.") paginator = commands.Paginator(max_size=1985) for task in self.tasks: paginator.add_line(f"{task.index}: `{task.ctx.command.qualified_name}`, invoked at " f"{task.ctx.message.created_at.strftime('%Y-%m-%d %H:%M:%S')} UTC") interface = PaginatorInterface(ctx.bot, paginator, owner=ctx.author) await interface.send_to(ctx)
Shows the currently running jishaku tasks.
def zoom_out(self): index = self._zoom_factors.index(self._zoom_factor) if index == 0: return self._zoom_factor = self._zoom_factors[index - 1] if self._zoom_factors.index(self._zoom_factor) == 0: self._button_zoom_out.config(state=tk.DISABLED) self._button_zoom_in.config(state=tk.NORMAL) self.draw_timeline()
Decrease zoom factor and redraw TimeLine
def _parse_tensor_info_proto(tensor_info): encoding = tensor_info.WhichOneof("encoding") dtype = tf.DType(tensor_info.dtype) shape = tf.TensorShape(tensor_info.tensor_shape) if encoding == "name": return ParsedTensorInfo(dtype=dtype, shape=shape, is_sparse=False) elif encoding == "coo_sparse": return ParsedTensorInfo(dtype=dtype, shape=shape, is_sparse=True) else: raise ValueError("Unsupported TensorInfo encoding %r" % encoding)
Returns a ParsedTensorInfo instance from a TensorInfo proto.
def dim_axis_label(dimensions, separator=', '): if not isinstance(dimensions, list): dimensions = [dimensions] return separator.join([d.pprint_label for d in dimensions])
Returns an axis label for one or more dimensions.
def _winsorize_wrapper(x, limits): if isinstance(x, pd.Series): if x.count() == 0: return x notnanx = ~np.isnan(x) x[notnanx] = scipy.stats.mstats.winsorize(x[notnanx], limits=limits) return x else: return scipy.stats.mstats.winsorize(x, limits=limits)
Wraps scipy winsorize function to drop na's
def getProjectionMatrix(self, eEye, fNearZ, fFarZ): fn = self.function_table.getProjectionMatrix result = fn(eEye, fNearZ, fFarZ) return result
The projection matrix for the specified eye
def _fromGUI(self, value): if value == '': if not self.IsNoneAllowed(): return 0 else: return else: try: return int(value) except ValueError: if self.IsLongAllowed(): try: return long(value) except ValueError: wx.TextCtrl.SetValue(self, "0") return 0 else: raise
Conversion function used in getting the value of the control.
def logged_exec(cmd): logger = logging.getLogger('fades.exec') logger.debug("Executing external command: %r", cmd) p = subprocess.Popen( cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True) stdout = [] for line in p.stdout: line = line[:-1] stdout.append(line) logger.debug(STDOUT_LOG_PREFIX + line) retcode = p.wait() if retcode: raise ExecutionError(retcode, cmd, stdout) return stdout
Execute a command, redirecting the output to the log.
def _download_file(uri, bulk_api): resp = requests.get(uri, headers=bulk_api.headers(), stream=True) with tempfile.TemporaryFile("w+b") as f: for chunk in resp.iter_content(chunk_size=None): f.write(chunk) f.seek(0) yield f
Download the bulk API result file for a single batch
def ipreorder(self): children = deque((self, )) while children: cur_node = children.pop() children.extend(reversed(cur_node.children)) yield cur_node
Depth-first pre-order iteration of tree nodes
def create_api_network_ipv4(self): return ApiNetworkIPv4( self.networkapi_url, self.user, self.password, self.user_ldap)
Get an instance of Api Networkv4 services facade.
def _op_shape(self, other): if isinstance(other, farray): if self.shape == other.shape: return self.shape elif self.size == other.size: return None else: raise ValueError("expected operand sizes to match") else: raise TypeError("expected farray input")
Return shape that will be used by farray constructor.
def start_polling(self, interval): interval = float(interval) self.polling = True self.term_checker.reset() logger.info("Starting polling for changes to the track list") while self.polling: loop_start = time() self.update_stream() self.handle_exceptions() elapsed = time() - loop_start sleep(max(0.1, interval - elapsed)) logger.warning("Term poll ceased!")
Start polling for term updates and streaming.
def reload(self): other = type(self).get(self.name, service=self.service) self.request_count = other.request_count
reload self from self.service
def update_shipment(self, resource_id, data): return Shipments(self.client).on(self).update(resource_id, data)
Update the tracking information of a shipment.
def _countdown(seconds): for i in range(seconds, 0, -1): sys.stdout.write("%02d" % i) time.sleep(1) sys.stdout.write("\b\b") sys.stdout.flush() sys.stdout.flush()
Wait `seconds` counting down.
def add(self, directory, path=None) -> None: objecttools.valid_variable_identifier(directory) if path is None: path = directory setattr(self, directory, path)
Add a directory and optionally its path.
def _set_used_as_input_variables_by_entity(self) -> Dict[str, List[str]]: if self.used_as_input_variables_by_entity is not None: return tax_benefit_system = self.tax_benefit_system assert set(self.used_as_input_variables) <= set(tax_benefit_system.variables.keys()), \ "Some variables used as input variables are not part of the tax benefit system:\n {}".format( set(self.used_as_input_variables).difference(set(tax_benefit_system.variables.keys())) ) self.used_as_input_variables_by_entity = dict() for entity in tax_benefit_system.entities: self.used_as_input_variables_by_entity[entity.key] = [ variable for variable in self.used_as_input_variables if tax_benefit_system.get_variable(variable).entity == entity ] return self.used_as_input_variables_by_entity
Identify and set the good input variables for the different entities
def _float_generator(descriptor, bitwidth): 'Helper to create floating point values' return gen.IterValueGenerator(descriptor.name, values.get_floats(bitwidth))
Helper to create floating point values
def runGetReadGroup(self, id_): compoundId = datamodel.ReadGroupCompoundId.parse(id_) dataset = self.getDataRepository().getDataset(compoundId.dataset_id) readGroupSet = dataset.getReadGroupSet(compoundId.read_group_set_id) readGroup = readGroupSet.getReadGroup(id_) return self.runGetRequest(readGroup)
Returns a read group with the given id_
def _units_in_area(self, rect): player_id = self._obs.observation.player_common.player_id return [u for u, p in self._visible_units() if rect.intersects_circle(p, u.radius) and u.owner == player_id]
Return the list of units that intersect the rect.
def read(self, n): bytes_wanted = n - self.buffer_length + self.pos + 1 if bytes_wanted > 0: self._buffer_bytes(bytes_wanted) end_pos = self.pos + n ret = self.buffer[self.pos + 1:end_pos + 1] self.pos = end_pos return ret
Reads n bytes into the internal buffer
def getClientSSLContext(self): sslctx = ssl.create_default_context(ssl.Purpose.SERVER_AUTH) self._loadCasIntoSSLContext(sslctx) return sslctx
Returns an ssl.SSLContext appropriate for initiating a TLS session
def _formatExternalIdentifier(self, element, element_type): if "http" not in element['database']: term = "{}:{}".format(element['database'], element['identifier']) namespaceTerm = self._toNamespaceURL(term) else: namespaceTerm = "{}{}".format( element['database'], element['identifier']) comparison = '?{} = <{}> '.format(element_type, namespaceTerm) return comparison
Formats a single external identifier for query
def silent_execute(self, code): try: self.kernel_client.execute(to_text_string(code), silent=True) except AttributeError: pass
Execute code in the kernel without increasing the prompt