code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def get_response(self, method, endpoint, headers=None, json=None, params=None, data=None):
# pylint: disable=too-many-arguments
"""
Returns the response from the requested endpoint with the requested method
:param method: str. one of the methods accepted by Requests ('POST', 'GET', ...)
:param endpoint: str. the relative endpoint to access
:param params: (optional) Dictionary or bytes to be sent in the query string
for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body
of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:return: Requests.response
"""
logger.debug("Parameters for get_response:")
logger.debug("\t - endpoint: %s", endpoint)
logger.debug("\t - method: %s", method)
logger.debug("\t - headers: %s", headers)
logger.debug("\t - json: %s", json)
logger.debug("\t - params: %s", params)
logger.debug("\t - data: %s", data)
url = self.get_url(endpoint)
# First stage. Errors are connection errors (timeout, no session, ...)
try:
response = self.session.request(method=method, url=url, headers=headers, json=json,
params=params, data=data, proxies=self.proxies,
timeout=self.timeout)
logger.debug("response headers: %s", response.headers)
logger.debug("response content: %s", response.content)
except RequestException as e:
response = {"_status": "ERR",
"_error": {"message": e, "code": BACKEND_ERROR},
"_issues": {"message": e, "code": BACKEND_ERROR}}
raise BackendException(code=BACKEND_ERROR,
message=e,
response=response)
else:
return response
|
Returns the response from the requested endpoint with the requested method
:param method: str. one of the methods accepted by Requests ('POST', 'GET', ...)
:param endpoint: str. the relative endpoint to access
:param params: (optional) Dictionary or bytes to be sent in the query string
for the :class:`Request`.
:param data: (optional) Dictionary, bytes, or file-like object to send in the body
of the :class:`Request`.
:param json: (optional) json to send in the body of the :class:`Request`.
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
:return: Requests.response
|
def convert_to_ns(self, value):
''' converts a value to the prefixed rdf ns equivalent. If not found
returns the value as is
args:
value: the value to convert
'''
parsed = self.parse_uri(value)
try:
rtn_val = "%s_%s" % (self.uri_dict[parsed[0]], parsed[1])
except KeyError:
rtn_val = self.pyhttp(value)
return rtn_val
|
converts a value to the prefixed rdf ns equivalent. If not found
returns the value as is
args:
value: the value to convert
|
def get_by_id(self, webhook, params={}, **options):
"""Returns the full record for the given webhook.
Parameters
----------
webhook : {Id} The webhook to get.
[params] : {Object} Parameters for the request
"""
path = "/webhooks/%s" % (webhook)
return self.client.get(path, params, **options)
|
Returns the full record for the given webhook.
Parameters
----------
webhook : {Id} The webhook to get.
[params] : {Object} Parameters for the request
|
def get_huisnummer_by_id(self, id):
'''
Retrieve a `huisnummer` by the Id.
:param integer id: the Id of the `huisnummer`
:rtype: :class:`Huisnummer`
'''
def creator():
res = crab_gateway_request(
self.client, 'GetHuisnummerWithStatusByHuisnummerId', id
)
if res == None:
raise GatewayResourceNotFoundException()
return Huisnummer(
res.HuisnummerId,
res.StatusHuisnummer,
res.Huisnummer,
res.StraatnaamId,
Metadata(
res.BeginDatum,
res.BeginTijd,
self.get_bewerking(res.BeginBewerking),
self.get_organisatie(res.BeginOrganisatie)
)
)
if self.caches['short'].is_configured:
key = 'GetHuisnummerWithStatusByHuisnummerId#%s' % (id)
huisnummer = self.caches['short'].get_or_create(key, creator)
else:
huisnummer = creator()
huisnummer.set_gateway(self)
return huisnummer
|
Retrieve a `huisnummer` by the Id.
:param integer id: the Id of the `huisnummer`
:rtype: :class:`Huisnummer`
|
def flatter(x, k=1):
'''
flatter(x) yields a numpy array equivalent to x but whose first dimension has been flattened.
flatter(x, k) yields a numpy array whose first k dimensions have been flattened; if k is
negative, the last k dimensions are flattened. If np.inf or -np.inf is passed, then this is
equivalent to flattest(x). Note that flatter(x) is equivalent to flatter(x,1).
flatter(x, 0) yields x.
'''
if k == 0: return x
x = x.toarray() if sps.issparse(x) else np.asarray(x)
if len(x.shape) - abs(k) < 2: return x.flatten()
k += np.sign(k)
if k > 0: return np.reshape(x, (-1,) + x.shape[k:])
else: return np.reshape(x, x.shape[:k] + (-1,))
|
flatter(x) yields a numpy array equivalent to x but whose first dimension has been flattened.
flatter(x, k) yields a numpy array whose first k dimensions have been flattened; if k is
negative, the last k dimensions are flattened. If np.inf or -np.inf is passed, then this is
equivalent to flattest(x). Note that flatter(x) is equivalent to flatter(x,1).
flatter(x, 0) yields x.
|
def op_nodes(self, op=None):
"""Get the list of "op" nodes in the dag.
Args:
op (Type): Instruction subclass op nodes to return. if op=None, return
all op nodes.
Returns:
list[DAGNode]: the list of node ids containing the given op.
"""
nodes = []
for node in self._multi_graph.nodes():
if node.type == "op":
if op is None or isinstance(node.op, op):
nodes.append(node)
return nodes
|
Get the list of "op" nodes in the dag.
Args:
op (Type): Instruction subclass op nodes to return. if op=None, return
all op nodes.
Returns:
list[DAGNode]: the list of node ids containing the given op.
|
def raw(request):
"""shows untransformed hierarchical xml output"""
foos = foobar_models.Foo.objects.all()
return HttpResponse(tree.xml(foos), mimetype='text/xml')
|
shows untransformed hierarchical xml output
|
def is_valid(self, tree):
"""
returns true, iff the order of the tokens in the graph are the
same as in the Conano file (converted to plain text).
"""
conano_plaintext = etree.tostring(tree, encoding='utf8', method='text')
token_str_list = conano_plaintext.split()
for i, plain_token in enumerate(token_str_list):
graph_token = self.node[self.tokens[i]][self.ns+':token']
if ensure_unicode(plain_token) != graph_token:
sys.stderr.write(
"Conano tokenizations don't match: {0} vs. {1} "
"({2})".format(plain_token, graph_token))
return False
return True
|
returns true, iff the order of the tokens in the graph are the
same as in the Conano file (converted to plain text).
|
def write_to_disk(
manifest_root_dir: Optional[Path] = None,
manifest_name: Optional[str] = None,
prettify: Optional[bool] = False,
) -> Manifest:
"""
Write the active manifest to disk
Defaults
- Writes manifest to cwd unless Path is provided as manifest_root_dir.
- Writes manifest with a filename of Manifest[version].json unless a desired
manifest name (which must end in json) is provided as manifest_name.
- Writes the minified manifest version to disk unless prettify is set to True.
"""
return _write_to_disk(manifest_root_dir, manifest_name, prettify)
|
Write the active manifest to disk
Defaults
- Writes manifest to cwd unless Path is provided as manifest_root_dir.
- Writes manifest with a filename of Manifest[version].json unless a desired
manifest name (which must end in json) is provided as manifest_name.
- Writes the minified manifest version to disk unless prettify is set to True.
|
def build(self, builder):
"""Build XML by appending to builder"""
params = dict(OID=self.oid, Name=self.name, DataType=self.datatype.value)
if self.sas_format_name is not None:
params["SASFormatName"] = self.sas_format_name
builder.start("CodeList", params)
for item in self.codelist_items:
item.build(builder)
for alias in self.aliases:
alias.build(builder)
builder.end("CodeList")
|
Build XML by appending to builder
|
def get_imgid(self, img):
"""Obtain a unique identifier of the image.
Parameters
----------
img : astropy.io.fits.HDUList
Returns
-------
str:
Identification of the image
"""
imgid = img.filename()
# More heuristics here...
# get FILENAME keyword, CHECKSUM, for example...
hdr = self.get_header(img)
if 'checksum' in hdr:
return hdr['checksum']
if 'filename' in hdr:
return hdr['filename']
if not imgid:
imgid = repr(img)
return imgid
|
Obtain a unique identifier of the image.
Parameters
----------
img : astropy.io.fits.HDUList
Returns
-------
str:
Identification of the image
|
def build_assert(cls: Type[_Block], nodes: List[ast.stmt], min_line_number: int) -> _Block:
"""
Assert block is all nodes that are after the Act node.
Note:
The filtering is *still* running off the line number of the Act
node, when instead it should be using the last line of the Act
block.
"""
return cls(filter_assert_nodes(nodes, min_line_number), LineType._assert)
|
Assert block is all nodes that are after the Act node.
Note:
The filtering is *still* running off the line number of the Act
node, when instead it should be using the last line of the Act
block.
|
def from_indra_pickle(path: str,
name: Optional[str] = None,
version: Optional[str] = None,
description: Optional[str] = None,
authors: Optional[str] = None,
contact: Optional[str] = None,
license: Optional[str] = None,
copyright: Optional[str] = None,
disclaimer: Optional[str] = None,
):
"""Import a model from :mod:`indra`.
:param path: Path to pickled list of :class:`indra.statements.Statement`
:param name: The name for the BEL graph
:param version: The version of the BEL graph
:param description: The description of the graph
:param authors: The authors of this graph
:param contact: The contact email for this graph
:param license: The license for this graph
:param copyright: The copyright for this graph
:param disclaimer: The disclaimer for this graph
:rtype: pybel.BELGraph
"""
with open(path, 'rb') as f:
statements = load(f)
return from_indra_statements(
stmts=statements,
name=name,
version=version,
description=description,
authors=authors,
contact=contact,
license=license,
copyright=copyright,
disclaimer=disclaimer,
)
|
Import a model from :mod:`indra`.
:param path: Path to pickled list of :class:`indra.statements.Statement`
:param name: The name for the BEL graph
:param version: The version of the BEL graph
:param description: The description of the graph
:param authors: The authors of this graph
:param contact: The contact email for this graph
:param license: The license for this graph
:param copyright: The copyright for this graph
:param disclaimer: The disclaimer for this graph
:rtype: pybel.BELGraph
|
def write(self, oprot):
'''
Write this object to the given output protocol and return self.
:type oprot: thryft.protocol._output_protocol._OutputProtocol
:rtype: pastpy.gen.database.impl.dbf.dbf_database_configuration.DbfDatabaseConfiguration
'''
oprot.write_struct_begin('DbfDatabaseConfiguration')
if self.pp_images_dir_path is not None:
oprot.write_field_begin(name='pp_images_dir_path', type=11, id=None)
oprot.write_string(self.pp_images_dir_path)
oprot.write_field_end()
if self.pp_install_dir_path is not None:
oprot.write_field_begin(name='pp_install_dir_path', type=11, id=None)
oprot.write_string(self.pp_install_dir_path)
oprot.write_field_end()
if self.pp_objects_dbf_file_path is not None:
oprot.write_field_begin(name='pp_objects_dbf_file_path', type=11, id=None)
oprot.write_string(self.pp_objects_dbf_file_path)
oprot.write_field_end()
oprot.write_field_stop()
oprot.write_struct_end()
return self
|
Write this object to the given output protocol and return self.
:type oprot: thryft.protocol._output_protocol._OutputProtocol
:rtype: pastpy.gen.database.impl.dbf.dbf_database_configuration.DbfDatabaseConfiguration
|
def _get_value(self):
"""
Return two delegating variables. Each variable should contain
a value attribute with the real value.
"""
x, y = self._point.x, self._point.y
self._px, self._py = self._item_point.canvas.get_matrix_i2i(self._item_point,
self._item_target).transform_point(x, y)
return self._px, self._py
|
Return two delegating variables. Each variable should contain
a value attribute with the real value.
|
def prune_influence_map_subj_obj(self):
"""Prune influence map to include only edges where the object of the
upstream rule matches the subject of the downstream rule."""
def get_rule_info(r):
result = {}
for ann in self.model.annotations:
if ann.subject == r:
if ann.predicate == 'rule_has_subject':
result['subject'] = ann.object
elif ann.predicate == 'rule_has_object':
result['object'] = ann.object
return result
im = self.get_im()
rules = im.nodes()
edges_to_prune = []
for r1, r2 in itertools.permutations(rules, 2):
if (r1, r2) not in im.edges():
continue
r1_info = get_rule_info(r1)
r2_info = get_rule_info(r2)
if 'object' not in r1_info or 'subject' not in r2_info:
continue
if r1_info['object'] != r2_info['subject']:
logger.info("Removing edge %s --> %s" % (r1, r2))
edges_to_prune.append((r1, r2))
im.remove_edges_from(edges_to_prune)
|
Prune influence map to include only edges where the object of the
upstream rule matches the subject of the downstream rule.
|
def translation(language):
"""
Return a translation object in the default 'django' domain.
"""
global _translations
if language not in _translations:
_translations[language] = Translations(language)
return _translations[language]
|
Return a translation object in the default 'django' domain.
|
def ServiceWorker_inspectWorker(self, versionId):
"""
Function path: ServiceWorker.inspectWorker
Domain: ServiceWorker
Method name: inspectWorker
Parameters:
Required arguments:
'versionId' (type: string) -> No description
No return value.
"""
assert isinstance(versionId, (str,)
), "Argument 'versionId' must be of type '['str']'. Received type: '%s'" % type(
versionId)
subdom_funcs = self.synchronous_command('ServiceWorker.inspectWorker',
versionId=versionId)
return subdom_funcs
|
Function path: ServiceWorker.inspectWorker
Domain: ServiceWorker
Method name: inspectWorker
Parameters:
Required arguments:
'versionId' (type: string) -> No description
No return value.
|
def RegisterParser(cls, parser_class):
"""Registers a parser class.
The parser classes are identified based on their lower case name.
Args:
parser_class (type): parser class (subclass of BaseParser).
Raises:
KeyError: if parser class is already set for the corresponding name.
"""
parser_name = parser_class.NAME.lower()
if parser_name in cls._parser_classes:
raise KeyError('Parser class already set for name: {0:s}.'.format(
parser_class.NAME))
cls._parser_classes[parser_name] = parser_class
|
Registers a parser class.
The parser classes are identified based on their lower case name.
Args:
parser_class (type): parser class (subclass of BaseParser).
Raises:
KeyError: if parser class is already set for the corresponding name.
|
def is_cnpj(numero, estrito=False):
"""Uma versão conveniente para usar em testes condicionais. Apenas retorna
verdadeiro ou falso, conforme o argumento é validado.
:param bool estrito: Padrão ``False``, indica se apenas os dígitos do
número deverão ser considerados. Se verdadeiro, potenciais caracteres
que formam a máscara serão removidos antes da validação ser realizada.
"""
try:
cnpj(digitos(numero) if not estrito else numero)
return True
except NumeroCNPJError:
pass
return False
|
Uma versão conveniente para usar em testes condicionais. Apenas retorna
verdadeiro ou falso, conforme o argumento é validado.
:param bool estrito: Padrão ``False``, indica se apenas os dígitos do
número deverão ser considerados. Se verdadeiro, potenciais caracteres
que formam a máscara serão removidos antes da validação ser realizada.
|
def is_any_type_set(sett: Set[Type]) -> bool:
"""
Helper method to check if a set of types is the {AnyObject} singleton
:param sett:
:return:
"""
return len(sett) == 1 and is_any_type(min(sett))
|
Helper method to check if a set of types is the {AnyObject} singleton
:param sett:
:return:
|
def purge(**kwargs):
'''
Purge all the jobs currently scheduled on the minion
CLI Example:
.. code-block:: bash
salt '*' schedule.purge
'''
ret = {'comment': [],
'result': True}
for name in list_(show_all=True, return_yaml=False):
if name == 'enabled':
continue
if name.startswith('__'):
continue
if 'test' in kwargs and kwargs['test']:
ret['result'] = True
ret['comment'].append('Job: {0} would be deleted from schedule.'.format(name))
else:
persist = True
if 'persist' in kwargs:
persist = kwargs['persist']
try:
eventer = salt.utils.event.get_event('minion', opts=__opts__)
res = __salt__['event.fire']({'name': name,
'func': 'delete',
'persist': persist}, 'manage_schedule')
if res:
event_ret = eventer.get_event(tag='/salt/minion/minion_schedule_delete_complete', wait=30)
if event_ret and event_ret['complete']:
_schedule_ret = event_ret['schedule']
if name not in _schedule_ret:
ret['result'] = True
ret['comment'].append('Deleted job: {0} from schedule.'.format(name))
else:
ret['comment'].append('Failed to delete job {0} from schedule.'.format(name))
ret['result'] = True
except KeyError:
# Effectively a no-op, since we can't really return without an event system
ret['comment'] = 'Event module not available. Schedule add failed.'
ret['result'] = True
return ret
|
Purge all the jobs currently scheduled on the minion
CLI Example:
.. code-block:: bash
salt '*' schedule.purge
|
def _tokenize_latex(self, exp):
"""
Internal method to tokenize latex
"""
tokens = []
prevexp = ""
while exp:
t, exp = self._get_next_token(exp)
if t.strip() != "":
tokens.append(t)
if prevexp == exp:
break
prevexp = exp
return tokens
|
Internal method to tokenize latex
|
def to_json(self):
""" Writes the complete Morse-Smale merge hierarchy to a string
object.
@ Out, a string object storing the entire merge hierarchy of
all minima and maxima.
"""
capsule = {}
capsule["Hierarchy"] = []
for (
dying,
(persistence, surviving, saddle),
) in self.merge_sequence.items():
capsule["Hierarchy"].append(
{
"Dying": dying,
"Persistence": persistence,
"Surviving": surviving,
"Saddle": saddle,
}
)
capsule["Partitions"] = []
base = np.array([None, None] * len(self.Y)).reshape(-1, 2)
for (min_index, max_index), items in self.base_partitions.items():
base[items, :] = [min_index, max_index]
capsule["Partitions"] = base.tolist()
return json.dumps(capsule)
|
Writes the complete Morse-Smale merge hierarchy to a string
object.
@ Out, a string object storing the entire merge hierarchy of
all minima and maxima.
|
def get_tagged_version(self):
"""
Get the version of the local working set as a StrictVersion or
None if no viable tag exists. If the local working set is itself
the tagged commit and the tip and there are no local
modifications, use the tag on the parent changeset.
"""
tags = list(self.get_tags())
if 'tip' in tags and not self.is_modified():
tags = self.get_parent_tags('tip')
versions = self.__versions_from_tags(tags)
return self.__best_version(versions)
|
Get the version of the local working set as a StrictVersion or
None if no viable tag exists. If the local working set is itself
the tagged commit and the tip and there are no local
modifications, use the tag on the parent changeset.
|
def sed(regexpr, repl, force=False, recursive=False, dpath_list=None,
fpath_list=None, verbose=None, include_patterns=None,
exclude_patterns=[]):
"""
Python implementation of sed. NOT FINISHED
searches and replaces text in files
Args:
regexpr (str): regx patterns to find
repl (str): text to replace
force (bool):
recursive (bool):
dpath_list (list): directories to search (defaults to cwd)
"""
#_grep(r, [repl], dpath_list=dpath_list, recursive=recursive)
if include_patterns is None:
include_patterns = ['*.py', '*.pyx', '*.pxi', '*.cxx', '*.cpp', '*.hxx', '*.hpp', '*.c', '*.h', '*.html', '*.tex']
if dpath_list is None:
dpath_list = [os.getcwd()]
if verbose is None:
verbose = ut.NOT_QUIET
if fpath_list is None:
greater_exclude_dirs = get_standard_exclude_dnames()
exclude_dirs = []
fpath_generator = matching_fpaths(
dpath_list, include_patterns, exclude_dirs,
greater_exclude_dirs=greater_exclude_dirs,
recursive=recursive, exclude_patterns=exclude_patterns)
else:
fpath_generator = fpath_list
if verbose:
print('sed-ing %r' % (dpath_list,))
print(' * regular expression : %r' % (regexpr,))
print(' * replacement : %r' % (repl,))
print(' * include_patterns : %r' % (include_patterns,))
print(' * recursive: %r' % (recursive,))
print(' * force: %r' % (force,))
from utool import util_str
print(' * fpath_list: %s' % (util_str.repr3(fpath_list),))
regexpr = extend_regex(regexpr)
#if '\x08' in regexpr:
# print('Remember \\x08 != \\b')
# print('subsituting for you for you')
# regexpr = regexpr.replace('\x08', '\\b')
# print(' * regular expression : %r' % (regexpr,))
# Walk through each directory recursively
num_changed = 0
num_files_checked = 0
fpaths_changed = []
for fpath in fpath_generator:
num_files_checked += 1
changed_lines = sedfile(fpath, regexpr, repl, force, verbose=verbose)
if changed_lines is not None:
fpaths_changed.append(fpath)
num_changed += len(changed_lines)
import utool as ut
print('num_files_checked = %r' % (num_files_checked,))
print('fpaths_changed = %s' % (ut.repr3(sorted(fpaths_changed)),))
print('total lines changed = %r' % (num_changed,))
|
Python implementation of sed. NOT FINISHED
searches and replaces text in files
Args:
regexpr (str): regx patterns to find
repl (str): text to replace
force (bool):
recursive (bool):
dpath_list (list): directories to search (defaults to cwd)
|
def from_list(cls, l):
"""Return a Point instance from a given list"""
if len(l) == 3:
x, y, z = map(float, l)
return cls(x, y, z)
elif len(l) == 2:
x, y = map(float, l)
return cls(x, y)
else:
raise AttributeError
|
Return a Point instance from a given list
|
def add_formats_by_name(self, rfmt_list):
"""
adds formats by short label descriptors, such as 'txt', 'json', or
'html'
"""
for fmt in rfmt_list:
if fmt == "json":
self.add_report_format(JSONReportFormat)
elif fmt in ("txt", "text"):
self.add_report_format(TextReportFormat)
elif fmt in ("htm", "html"):
self.add_report_format(CheetahReportFormat)
|
adds formats by short label descriptors, such as 'txt', 'json', or
'html'
|
def view_indexes(self, done=None):
'''return a list waypoint indexes in view order'''
ret = []
if done is None:
done = set()
idx = 0
# find first point not done yet
while idx < self.count():
if not idx in done:
break
idx += 1
while idx < self.count():
w = self.wp(idx)
if idx in done:
if w.x != 0 or w.y != 0:
ret.append(idx)
break
done.add(idx)
if w.command == mavutil.mavlink.MAV_CMD_DO_JUMP:
idx = int(w.param1)
w = self.wp(idx)
if w.x != 0 or w.y != 0:
ret.append(idx)
continue
if (w.x != 0 or w.y != 0) and self.is_location_command(w.command):
ret.append(idx)
idx += 1
return ret
|
return a list waypoint indexes in view order
|
def cell_fate(data, groupby='clusters', disconnected_groups=None, self_transitions=False, n_neighbors=None, copy=False):
"""Computes individual cell endpoints
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
groupby: `str` (default: `'clusters'`)
Key to which to assign the fates.
disconnected_groups: list of `str` (default: `None`)
Which groups to treat as disconnected for fate assignment.
n_neighbors: `int` (default: `None`)
Number of neighbors to restrict transitions to.
copy: `bool` (default: `False`)
Return a copy instead of writing to `adata`.
Returns
-------
Returns or updates `adata` with the attributes
cell_fate: `.obs`
most likely cell fate for each individual cell
cell_fate_confidence: `.obs`
confidence of transitioning to the assigned fate
"""
adata = data.copy() if copy else data
logg.info('computing cell fates', r=True)
n_neighbors = 10 if n_neighbors is None else n_neighbors
_adata = adata.copy()
vgraph = VelocityGraph(_adata, n_neighbors=n_neighbors, approx=True, n_recurse_neighbors=1)
vgraph.compute_cosines()
_adata.uns['velocity_graph'] = vgraph.graph
_adata.uns['velocity_graph_neg'] = vgraph.graph_neg
T = transition_matrix(_adata, self_transitions=self_transitions)
I = np.eye(_adata.n_obs)
fate = np.linalg.inv(I - T)
if issparse(T): fate = fate.A
cell_fates = np.array(_adata.obs[groupby][fate.argmax(1)])
if disconnected_groups is not None:
idx = _adata.obs[groupby].isin(disconnected_groups)
cell_fates[idx] = _adata.obs[groupby][idx]
adata.obs['cell_fate'] = cell_fates
adata.obs['cell_fate_confidence'] = fate.max(1) / fate.sum(1)
strings_to_categoricals(adata)
logg.info(' finished', time=True, end=' ' if settings.verbosity > 2 else '\n')
logg.hint(
'added\n'
' \'cell_fate\', most likely cell fate (adata.obs)\n'
' \'cell_fate_confidence\', confidence of transitioning to the assigned fate (adata.obs)')
return adata if copy else None
|
Computes individual cell endpoints
Arguments
---------
data: :class:`~anndata.AnnData`
Annotated data matrix.
groupby: `str` (default: `'clusters'`)
Key to which to assign the fates.
disconnected_groups: list of `str` (default: `None`)
Which groups to treat as disconnected for fate assignment.
n_neighbors: `int` (default: `None`)
Number of neighbors to restrict transitions to.
copy: `bool` (default: `False`)
Return a copy instead of writing to `adata`.
Returns
-------
Returns or updates `adata` with the attributes
cell_fate: `.obs`
most likely cell fate for each individual cell
cell_fate_confidence: `.obs`
confidence of transitioning to the assigned fate
|
def _add_document(self, doc_id, conn=None, nosave=False, score=1.0, payload=None,
replace=False, partial=False, language=None, **fields):
"""
Internal add_document used for both batch and single doc indexing
"""
if conn is None:
conn = self.redis
if partial:
replace = True
args = [self.ADD_CMD, self.index_name, doc_id, score]
if nosave:
args.append('NOSAVE')
if payload is not None:
args.append('PAYLOAD')
args.append(payload)
if replace:
args.append('REPLACE')
if partial:
args.append('PARTIAL')
if language:
args += ['LANGUAGE', language]
args.append('FIELDS')
args += list(itertools.chain(*fields.items()))
return conn.execute_command(*args)
|
Internal add_document used for both batch and single doc indexing
|
def info_authn(self):
"""Check to see if user if authenticated for info.json.
Must have Authorization header with value that has the form
"Bearer TOKEN", where TOKEN is an appropriate and valid access
token.
"""
authz_header = request.headers.get('Authorization', '[none]')
if (not authz_header.startswith('Bearer ')):
return False
token = authz_header[7:]
return self.access_token_valid(
token, "info_authn: Authorization header")
|
Check to see if user if authenticated for info.json.
Must have Authorization header with value that has the form
"Bearer TOKEN", where TOKEN is an appropriate and valid access
token.
|
def get_queryset(self):
"""
Returns a queryset of all states holding a non-special election on
a date.
"""
try:
date = ElectionDay.objects.get(date=self.kwargs["date"])
except Exception:
raise APIException(
"No elections on {}.".format(self.kwargs["date"])
)
division_ids = []
normal_elections = date.elections.filter()
if len(normal_elections) > 0:
for election in date.elections.all():
if election.division.level.name == DivisionLevel.STATE:
division_ids.append(election.division.uid)
elif election.division.level.name == DivisionLevel.DISTRICT:
division_ids.append(election.division.parent.uid)
return Division.objects.filter(uid__in=division_ids)
|
Returns a queryset of all states holding a non-special election on
a date.
|
def _iter_path(pointer):
"""Take a cairo_path_t * pointer
and yield ``(path_operation, coordinates)`` tuples.
See :meth:`Context.copy_path` for the data structure.
"""
_check_status(pointer.status)
data = pointer.data
num_data = pointer.num_data
points_per_type = PATH_POINTS_PER_TYPE
position = 0
while position < num_data:
path_data = data[position]
path_type = path_data.header.type
points = ()
for i in range(points_per_type[path_type]):
point = data[position + i + 1].point
points += (point.x, point.y)
yield (path_type, points)
position += path_data.header.length
|
Take a cairo_path_t * pointer
and yield ``(path_operation, coordinates)`` tuples.
See :meth:`Context.copy_path` for the data structure.
|
def store_text_cursor_anchor(self):
"""
Stores the document cursor anchor.
:return: Method success.
:rtype: bool
"""
self.__text_cursor_anchor = (self.textCursor(),
self.horizontalScrollBar().sliderPosition(),
self.verticalScrollBar().sliderPosition())
return True
|
Stores the document cursor anchor.
:return: Method success.
:rtype: bool
|
def chi_squareds(self, p=None):
"""
Returns a list of chi squared for each data set. Also uses ydata_massaged.
p=None means use the fit results
"""
if len(self._set_xdata)==0 or len(self._set_ydata)==0: return None
if p is None: p = self.results[0]
# get the residuals
rs = self.studentized_residuals(p)
# Handle the none case
if rs == None: return None
# square em and sum em.
cs = []
for r in rs: cs.append(sum(r**2))
return cs
|
Returns a list of chi squared for each data set. Also uses ydata_massaged.
p=None means use the fit results
|
def _post_login_page(self, login_url):
"""Login to HydroQuebec website."""
data = {"login": self.username,
"_58_password": self.password}
try:
raw_res = yield from self._session.post(login_url,
data=data,
timeout=self._timeout,
allow_redirects=False)
except OSError:
raise PyHydroQuebecError("Can not submit login form")
if raw_res.status != 302:
raise PyHydroQuebecError("Login error: Bad HTTP status code. "
"Please check your username/password.")
return True
|
Login to HydroQuebec website.
|
def get_datastream_data(self, datastream, options):
"""
Get input data for the datastream
:param datastream: string
:param options: dict
"""
response_format=None
if options and 'format' in options and options['format'] is not None:
response_format = options['format']
options['format'] = None
url = '/datastream/' + str(datastream) + '/data'
response = self.http.downstream(url, response_format)
return response
|
Get input data for the datastream
:param datastream: string
:param options: dict
|
def get_source(self, name):
"""Concrete implementation of InspectLoader.get_source."""
path = self.get_filename(name)
try:
source_bytes = self.get_data(path)
except OSError as exc:
e = _ImportError('source not available through get_data()',
name=name)
e.__cause__ = exc
raise e
return decode_source(source_bytes)
|
Concrete implementation of InspectLoader.get_source.
|
def get_active_services():
"""
Retrieve a list of all active system services.
@see: L{get_services},
L{start_service}, L{stop_service},
L{pause_service}, L{resume_service}
@rtype: list( L{win32.ServiceStatusProcessEntry} )
@return: List of service status descriptors.
"""
with win32.OpenSCManager(
dwDesiredAccess = win32.SC_MANAGER_ENUMERATE_SERVICE
) as hSCManager:
return [ entry for entry in win32.EnumServicesStatusEx(hSCManager,
dwServiceType = win32.SERVICE_WIN32,
dwServiceState = win32.SERVICE_ACTIVE) \
if entry.ProcessId ]
|
Retrieve a list of all active system services.
@see: L{get_services},
L{start_service}, L{stop_service},
L{pause_service}, L{resume_service}
@rtype: list( L{win32.ServiceStatusProcessEntry} )
@return: List of service status descriptors.
|
def _decode_datetime(obj):
"""Decode a msgpack'ed datetime."""
if '__datetime__' in obj:
obj = datetime.datetime.strptime(obj['as_str'].decode(), "%Y%m%dT%H:%M:%S.%f")
return obj
|
Decode a msgpack'ed datetime.
|
def copy(self, extra=None):
"""
Creates a copy of this instance with the same uid and some
extra params. The default implementation creates a
shallow copy using :py:func:`copy.copy`, and then copies the
embedded and extra parameters over and returns the copy.
Subclasses should override this method if the default approach
is not sufficient.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
that = copy.copy(self)
that._paramMap = {}
that._defaultParamMap = {}
return self._copyValues(that, extra)
|
Creates a copy of this instance with the same uid and some
extra params. The default implementation creates a
shallow copy using :py:func:`copy.copy`, and then copies the
embedded and extra parameters over and returns the copy.
Subclasses should override this method if the default approach
is not sufficient.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
|
def _consolidate_auth(ssh_password=None,
ssh_pkey=None,
ssh_pkey_password=None,
allow_agent=True,
host_pkey_directories=None,
logger=None):
"""
Get sure authentication information is in place.
``ssh_pkey`` may be of classes:
- ``str`` - in this case it represents a private key file; public
key will be obtained from it
- ``paramiko.Pkey`` - it will be transparently added to loaded keys
"""
ssh_loaded_pkeys = SSHTunnelForwarder.get_keys(
logger=logger,
host_pkey_directories=host_pkey_directories,
allow_agent=allow_agent
)
if isinstance(ssh_pkey, string_types):
ssh_pkey_expanded = os.path.expanduser(ssh_pkey)
if os.path.exists(ssh_pkey_expanded):
ssh_pkey = SSHTunnelForwarder.read_private_key_file(
pkey_file=ssh_pkey_expanded,
pkey_password=ssh_pkey_password or ssh_password,
logger=logger
)
elif logger:
logger.warning('Private key file not found: {0}'
.format(ssh_pkey))
if isinstance(ssh_pkey, paramiko.pkey.PKey):
ssh_loaded_pkeys.insert(0, ssh_pkey)
if not ssh_password and not ssh_loaded_pkeys:
raise ValueError('No password or public key available!')
return (ssh_password, ssh_loaded_pkeys)
|
Get sure authentication information is in place.
``ssh_pkey`` may be of classes:
- ``str`` - in this case it represents a private key file; public
key will be obtained from it
- ``paramiko.Pkey`` - it will be transparently added to loaded keys
|
def get_mac_acl_for_intf_input_interface_type(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_mac_acl_for_intf = ET.Element("get_mac_acl_for_intf")
config = get_mac_acl_for_intf
input = ET.SubElement(get_mac_acl_for_intf, "input")
interface_type = ET.SubElement(input, "interface-type")
interface_type.text = kwargs.pop('interface_type')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def adjust_worker_number_by_load(self):
"""Try to create the minimum workers specified in the configuration
:return: None
"""
if self.interrupted:
logger.debug("Trying to adjust worker number. Ignoring because we are stopping.")
return
to_del = []
logger.debug("checking worker count."
" Currently: %d workers, min per module : %d, max per module : %d",
len(self.workers), self.min_workers, self.max_workers)
# I want at least min_workers by module then if I can, I add worker for load balancing
for mod in self.q_by_mod:
# At least min_workers
todo = max(0, self.min_workers - len(self.q_by_mod[mod]))
for _ in range(todo):
try:
self.create_and_launch_worker(module_name=mod)
# Maybe this modules is not a true worker one.
# if so, just delete if from q_by_mod
except NotWorkerMod:
to_del.append(mod)
break
for mod in to_del:
logger.warning("The module %s is not a worker one, I remove it from the worker list.",
mod)
del self.q_by_mod[mod]
|
Try to create the minimum workers specified in the configuration
:return: None
|
def clone(self, into=None):
"""Clone this PEX environment into a new PEXBuilder.
:keyword into: (optional) An optional destination directory to clone this PEXBuilder into. If
not specified, a temporary directory will be created.
Clones PEXBuilder into a new location. This is useful if the PEXBuilder has been frozen and
rendered immutable.
.. versionchanged:: 0.8
The temporary directory created when ``into`` is not specified is now garbage collected on
interpreter exit.
"""
chroot_clone = self._chroot.clone(into=into)
clone = self.__class__(
chroot=chroot_clone,
interpreter=self._interpreter,
pex_info=self._pex_info.copy(),
preamble=self._preamble,
copy=self._copy)
clone.set_shebang(self._shebang)
clone._distributions = self._distributions.copy()
return clone
|
Clone this PEX environment into a new PEXBuilder.
:keyword into: (optional) An optional destination directory to clone this PEXBuilder into. If
not specified, a temporary directory will be created.
Clones PEXBuilder into a new location. This is useful if the PEXBuilder has been frozen and
rendered immutable.
.. versionchanged:: 0.8
The temporary directory created when ``into`` is not specified is now garbage collected on
interpreter exit.
|
def pdf_case_report(institute_id, case_name):
"""Download a pdf report for a case"""
institute_obj, case_obj = institute_and_case(store, institute_id, case_name)
data = controllers.case_report_content(store, institute_obj, case_obj)
# add coverage report on the bottom of this report
if current_app.config.get('SQLALCHEMY_DATABASE_URI'):
data['coverage_report'] = controllers.coverage_report_contents(store, institute_obj, case_obj, request.url_root)
# workaround to be able to print the case pedigree to pdf
if case_obj.get('madeline_info') is not None:
with open(os.path.join(cases_bp.static_folder, 'madeline.svg'), 'w') as temp_madeline:
temp_madeline.write(case_obj['madeline_info'])
html_report = render_template('cases/case_report.html', institute=institute_obj, case=case_obj, format='pdf', **data)
return render_pdf(HTML(string=html_report), download_filename=case_obj['display_name']+'_'+datetime.datetime.now().strftime("%Y-%m-%d")+'_scout.pdf')
|
Download a pdf report for a case
|
def get_user_shell():
"""
For commands executed directly via an SSH command-line, SSH looks up the
user's shell via getpwuid() and only defaults to /bin/sh if that field is
missing or empty.
"""
try:
pw_shell = pwd.getpwuid(os.geteuid()).pw_shell
except KeyError:
pw_shell = None
return pw_shell or '/bin/sh'
|
For commands executed directly via an SSH command-line, SSH looks up the
user's shell via getpwuid() and only defaults to /bin/sh if that field is
missing or empty.
|
def _ige(message, key, iv, operation="decrypt"):
"""Given a key, given an iv, and message
do whatever operation asked in the operation field.
Operation will be checked for: "decrypt" and "encrypt" strings.
Returns the message encrypted/decrypted.
message must be a multiple by 16 bytes (for division in 16 byte blocks)
key must be 32 byte
iv must be 32 byte (it's not internally used in AES 256 ECB, but it's
needed for IGE)"""
message = bytes(message)
if len(key) != 32:
raise ValueError("key must be 32 bytes long (was " +
str(len(key)) + " bytes)")
if len(iv) != 32:
raise ValueError("iv must be 32 bytes long (was " +
str(len(iv)) + " bytes)")
cipher = AES.new(key, AES.MODE_ECB, iv)
blocksize = cipher.block_size
if len(message) % blocksize != 0:
raise ValueError("message must be a multiple of 16 bytes (try adding " +
str(16 - len(message) % 16) + " bytes of padding)")
ivp = iv[0:blocksize]
ivp2 = iv[blocksize:]
ciphered = bytes()
for i in range(0, len(message), blocksize):
indata = message[i:i+blocksize]
if operation == "decrypt":
xored = strxor(indata, ivp2)
decrypt_xored = cipher.decrypt(xored)
outdata = strxor(decrypt_xored, ivp)
ivp = indata
ivp2 = outdata
elif operation == "encrypt":
xored = strxor(indata, ivp)
encrypt_xored = cipher.encrypt(xored)
outdata = strxor(encrypt_xored, ivp2)
ivp = outdata
ivp2 = indata
else:
raise ValueError("operation must be either 'decrypt' or 'encrypt'")
ciphered += outdata
return ciphered
|
Given a key, given an iv, and message
do whatever operation asked in the operation field.
Operation will be checked for: "decrypt" and "encrypt" strings.
Returns the message encrypted/decrypted.
message must be a multiple by 16 bytes (for division in 16 byte blocks)
key must be 32 byte
iv must be 32 byte (it's not internally used in AES 256 ECB, but it's
needed for IGE)
|
def add_line(self, p1, p2, char_length):
"""
Add a line to the list. Check if the nodes already exist, and add them
if not.
Return the line index (1-indixed, starting with 1)
"""
p1_id = self.get_point_id(p1, char_length)
p2_id = self.get_point_id(p2, char_length)
self.Lines.append((p1_id, p2_id))
return len(self.Lines)
|
Add a line to the list. Check if the nodes already exist, and add them
if not.
Return the line index (1-indixed, starting with 1)
|
def enumerate_device_serials(vid=FT232H_VID, pid=FT232H_PID):
"""Return a list of all FT232H device serial numbers connected to the
machine. You can use these serial numbers to open a specific FT232H device
by passing it to the FT232H initializer's serial parameter.
"""
try:
# Create a libftdi context.
ctx = None
ctx = ftdi.new()
# Enumerate FTDI devices.
device_list = None
count, device_list = ftdi.usb_find_all(ctx, vid, pid)
if count < 0:
raise RuntimeError('ftdi_usb_find_all returned error {0}: {1}'.format(count, ftdi.get_error_string(self._ctx)))
# Walk through list of devices and assemble list of serial numbers.
devices = []
while device_list is not None:
# Get USB device strings and add serial to list of devices.
ret, manufacturer, description, serial = ftdi.usb_get_strings(ctx, device_list.dev, 256, 256, 256)
if serial is not None:
devices.append(serial)
device_list = device_list.next
return devices
finally:
# Make sure to clean up list and context when done.
if device_list is not None:
ftdi.list_free(device_list)
if ctx is not None:
ftdi.free(ctx)
|
Return a list of all FT232H device serial numbers connected to the
machine. You can use these serial numbers to open a specific FT232H device
by passing it to the FT232H initializer's serial parameter.
|
def install_napps(cls, napps):
"""Install local or remote NApps.
This method is recursive, it will install each napps and your
dependencies.
"""
mgr = NAppsManager()
for napp in napps:
mgr.set_napp(*napp)
LOG.info(' NApp %s:', mgr.napp_id)
if not mgr.is_installed():
try:
cls.install_napp(mgr)
if not mgr.is_enabled():
cls.enable_napp(mgr)
napp_dependencies = mgr.dependencies()
if napp_dependencies:
LOG.info('Installing Dependencies:')
cls.install_napps(napp_dependencies)
else:
LOG.warning(' Napp already enabled.')
except KytosException:
continue
else:
LOG.warning(' Napp already enabled.')
|
Install local or remote NApps.
This method is recursive, it will install each napps and your
dependencies.
|
def _get_vars(self):
''' load the vars section from a play, accounting for all sorts of variable features
including loading from yaml files, prompting, and conditional includes of the first
file found in a list. '''
if self.vars is None:
self.vars = {}
if type(self.vars) not in [dict, list]:
raise errors.AnsibleError("'vars' section must contain only key/value pairs")
vars = {}
# translate a list of vars into a dict
if type(self.vars) == list:
for item in self.vars:
if getattr(item, 'items', None) is None:
raise errors.AnsibleError("expecting a key-value pair in 'vars' section")
k, v = item.items()[0]
vars[k] = v
else:
vars.update(self.vars)
if type(self.playbook.extra_vars) == dict:
vars.update(self.playbook.extra_vars)
if type(self.vars_prompt) == list:
for var in self.vars_prompt:
if not 'name' in var:
raise errors.AnsibleError("'vars_prompt' item is missing 'name:'")
vname = var['name']
prompt = var.get("prompt", vname)
default = var.get("default", None)
private = var.get("private", True)
confirm = var.get("confirm", False)
encrypt = var.get("encrypt", None)
salt_size = var.get("salt_size", None)
salt = var.get("salt", None)
if vname not in self.playbook.extra_vars:
vars[vname] = self.playbook.callbacks.on_vars_prompt (
vname, private, prompt, encrypt, confirm, salt_size, salt, default
)
elif type(self.vars_prompt) == dict:
for (vname, prompt) in self.vars_prompt.iteritems():
prompt_msg = "%s: " % prompt
if vname not in self.playbook.extra_vars:
vars[vname] = self.playbook.callbacks.on_vars_prompt(
varname=vname, private=False, prompt=prompt_msg, default=None
)
else:
raise errors.AnsibleError("'vars_prompt' section is malformed, see docs")
results = self.playbook.extra_vars.copy()
results.update(vars)
return results
|
load the vars section from a play, accounting for all sorts of variable features
including loading from yaml files, prompting, and conditional includes of the first
file found in a list.
|
def _format_info(self):
"""Generate info line for GNTP Message
:return string:
"""
info = 'GNTP/%s %s' % (
self.info.get('version'),
self.info.get('messagetype'),
)
if self.info.get('encryptionAlgorithmID', None):
info += ' %s:%s' % (
self.info.get('encryptionAlgorithmID'),
self.info.get('ivValue'),
)
else:
info += ' NONE'
if self.info.get('keyHashAlgorithmID', None):
info += ' %s:%s.%s' % (
self.info.get('keyHashAlgorithmID'),
self.info.get('keyHash'),
self.info.get('salt')
)
return info
|
Generate info line for GNTP Message
:return string:
|
def as_text(self):
'''Fetch and render all regions
For search and test purposes
just a prototype
'''
from leonardo.templatetags.leonardo_tags import _render_content
request = get_anonymous_request(self)
content = ''
try:
for region in [region.key
for region in self._feincms_all_regions]:
content += ''.join(
_render_content(content, request=request, context={})
for content in getattr(self.content, region))
except PermissionDenied:
pass
except Exception as e:
LOG.exception(e)
return content
|
Fetch and render all regions
For search and test purposes
just a prototype
|
def get_index(table, field_name, op, value):
'''
Returns the index of the first list entry that matches. If no matches
are found, it returns None
NOTE: it is not returning a list. It is returning an integer in range 0..LEN(target)
NOTE: both 'None' and 0 evaluate as False in python. So, if you are checking for a
None being returned, be explicit. "if myindex==None:" not simply "if not myindex:"
'''
counter = 0
for row in table:
dict_row = convert_to_dict(row)
if do_op(dict_row.get(field_name, None), op, value):
return counter
counter += 1
return None
|
Returns the index of the first list entry that matches. If no matches
are found, it returns None
NOTE: it is not returning a list. It is returning an integer in range 0..LEN(target)
NOTE: both 'None' and 0 evaluate as False in python. So, if you are checking for a
None being returned, be explicit. "if myindex==None:" not simply "if not myindex:"
|
def infer_shape(self, node, input_shapes):
"""Return a list of output shapes based on ``input_shapes``.
This method is optional. It allows to compute the shape of the
output without having to evaluate.
Parameters
----------
node : `theano.gof.graph.Apply`
The node of this Op in the computation graph.
input_shapes : 1-element list of `theano.compile.ops.Shape`
Symbolic shape of the input.
Returns
-------
output_shapes : 1-element list of tuples
Fixed shape of the output determined by `odl_op`.
"""
if isinstance(self.operator, Functional):
return [()]
else:
# Need to convert to native to avoid error in Theano from
# future.int
return [tuple(native(si) for si in self.operator.range.shape)]
|
Return a list of output shapes based on ``input_shapes``.
This method is optional. It allows to compute the shape of the
output without having to evaluate.
Parameters
----------
node : `theano.gof.graph.Apply`
The node of this Op in the computation graph.
input_shapes : 1-element list of `theano.compile.ops.Shape`
Symbolic shape of the input.
Returns
-------
output_shapes : 1-element list of tuples
Fixed shape of the output determined by `odl_op`.
|
def update_kwargs(kwargs, *updates):
"""
Utility function for merging multiple keyword arguments, depending on their type:
* Non-existent keys are added.
* Existing lists or tuples are extended, but not duplicating entries.
The keywords ``command`` and ``entrypoint`` are however simply overwritten.
* Nested dictionaries are updated, overriding previous key-value assignments.
* Other items are simply overwritten (just like in a regular dictionary update) unless the updating value is
``None``.
Lists/tuples and dictionaries are (shallow-)copied before adding and late resolving values are looked up.
This function does not recurse.
:param kwargs: Base keyword arguments. This is modified in-place.
:type kwargs: dict
:param updates: Dictionaries to update ``kwargs`` with.
:type updates: dict
"""
for update in updates:
if not update:
continue
for key, val in six.iteritems(update):
u_item = resolve_value(val)
if u_item is None:
continue
if key in ('command' or 'entrypoint'):
kwargs[key] = u_item
elif isinstance(u_item, (tuple, list)):
kw_item = kwargs.get(key)
u_list = map(resolve_value, u_item)
if isinstance(kw_item, list):
merge_list(kw_item, u_list)
elif isinstance(kw_item, tuple):
new_list = list(kw_item)
merge_list(new_list, u_list)
kwargs[key] = new_list
else:
kwargs[key] = list(u_list)
elif isinstance(u_item, dict):
kw_item = kwargs.get(key)
u_dict = {u_k: resolve_value(u_v) for u_k, u_v in six.iteritems(u_item)}
if isinstance(kw_item, dict):
kw_item.update(u_dict)
else:
kwargs[key] = u_dict
else:
kwargs[key] = u_item
|
Utility function for merging multiple keyword arguments, depending on their type:
* Non-existent keys are added.
* Existing lists or tuples are extended, but not duplicating entries.
The keywords ``command`` and ``entrypoint`` are however simply overwritten.
* Nested dictionaries are updated, overriding previous key-value assignments.
* Other items are simply overwritten (just like in a regular dictionary update) unless the updating value is
``None``.
Lists/tuples and dictionaries are (shallow-)copied before adding and late resolving values are looked up.
This function does not recurse.
:param kwargs: Base keyword arguments. This is modified in-place.
:type kwargs: dict
:param updates: Dictionaries to update ``kwargs`` with.
:type updates: dict
|
def _get_audio_sample_bit(self, audio_abs_path):
"""
Parameters
----------
audio_abs_path : str
Returns
-------
sample_bit : int
"""
sample_bit = int(
subprocess.check_output(
("""sox --i {} | grep "{}" | awk -F " : " '{{print $2}}' | """
"""grep -oh "^[^-]*" """).format(audio_abs_path, "Precision"),
shell=True, universal_newlines=True).rstrip())
return sample_bit
|
Parameters
----------
audio_abs_path : str
Returns
-------
sample_bit : int
|
def get_slack_channels(self, token):
'''
Get all channel names from Slack
'''
ret = salt.utils.slack.query(
function='rooms',
api_key=token,
# These won't be honored until https://github.com/saltstack/salt/pull/41187/files is merged
opts={
'exclude_archived': True,
'exclude_members': True
})
channels = {}
if 'message' in ret:
for item in ret['message']:
channels[item['id']] = item['name']
return channels
|
Get all channel names from Slack
|
def duration(self, value):
"""The duration property.
Args:
value (string). the property value.
"""
if value == self._defaults['duration'] and 'duration' in self._values:
del self._values['duration']
else:
self._values['duration'] = value
|
The duration property.
Args:
value (string). the property value.
|
def _on_cluster_discovery(self, future):
"""Invoked when the Redis server has responded to the ``CLUSTER_NODES``
command.
:param future: The future containing the response from Redis
:type future: tornado.concurrent.Future
"""
LOGGER.debug('_on_cluster_discovery(%r)', future)
common.maybe_raise_exception(future)
nodes = future.result()
for node in nodes:
name = '{}:{}'.format(node.ip, node.port)
if name in self._cluster:
LOGGER.debug('Updating cluster connection info for %s:%s',
node.ip, node.port)
self._cluster[name].set_slots(node.slots)
self._cluster[name].set_read_only('slave' in node.flags)
else:
self._create_cluster_connection(node)
self._discovery = True
|
Invoked when the Redis server has responded to the ``CLUSTER_NODES``
command.
:param future: The future containing the response from Redis
:type future: tornado.concurrent.Future
|
def get_plat_specifier():
"""
Standard platform specifier used by distutils
"""
import setuptools # NOQA
import distutils
plat_name = distutils.util.get_platform()
plat_specifier = ".%s-%s" % (plat_name, sys.version[0:3])
if hasattr(sys, 'gettotalrefcount'):
plat_specifier += '-pydebug'
return plat_specifier
|
Standard platform specifier used by distutils
|
def _get_asset_content(self, asset_id, asset_content_type_str=None, asset_content_id=None):
"""stub"""
rm = self.my_osid_object._get_provider_manager('REPOSITORY')
if 'assignedBankIds' in self.my_osid_object._my_map:
if self.my_osid_object._proxy is not None:
als = rm.get_asset_lookup_session_for_repository(
Id(self.my_osid_object._my_map['assignedBankIds'][0]),
self.my_osid_object._proxy)
else:
als = rm.get_asset_lookup_session_for_repository(
Id(self.my_osid_object._my_map['assignedBankIds'][0]))
elif 'assignedBookIds' in self.my_osid_object._my_map:
if self.my_osid_object._proxy is not None:
als = rm.get_asset_lookup_session_for_repository(
Id(self.my_osid_object._my_map['assignedBookIds'][0]),
self.my_osid_object._proxy)
else:
als = rm.get_asset_lookup_session_for_repository(
Id(self.my_osid_object._my_map['assignedBookIds'][0]))
elif 'assignedRepositoryIds' in self.my_osid_object._my_map:
if self.my_osid_object._proxy is not None:
als = rm.get_asset_lookup_session_for_repository(
Id(self.my_osid_object._my_map['assignedRepositoryIds'][0]),
self.my_osid_object._proxy)
else:
als = rm.get_asset_lookup_session_for_repository(
Id(self.my_osid_object._my_map['assignedRepositoryIds'][0]))
else:
raise KeyError
if asset_content_id is not None:
ac_list = als.get_asset(asset_id).get_asset_contents()
for ac in ac_list:
if str(ac.ident) == str(asset_content_id):
return ac
if not asset_content_type_str:
return next(als.get_asset(asset_id).get_asset_contents()) # Just return first one
else:
if isinstance(asset_content_type_str, Type):
asset_content_type_str = str(asset_content_type_str)
for ac in als.get_asset(asset_id).get_asset_contents():
if ac.get_genus_type() == Type(asset_content_type_str):
return ac
raise NotFound()
|
stub
|
def ROC_AUC_analysis(adata,groupby,group=None, n_genes=100):
"""Calculate correlation matrix.
Calculate a correlation matrix for genes strored in sample annotation using rank_genes_groups.py
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
groupby : `str`
The key of the sample grouping to consider.
group : `str`, int, optional (default: None)
Group name or index for which the correlation matrix for top_ranked genes should be calculated.
If no parameter is passed, ROC/AUC is calculated for all groups
n_genes : `int`, optional (default: 100)
For how many genes to calculate ROC and AUC. If no parameter is passed, calculation is done for
all stored top ranked genes.
"""
if group is None:
pass
# TODO: Loop over all groups instead of just taking one.
# Assume group takes an int value for one group for the moment.
name_list = list()
for j, k in enumerate(adata.uns['rank_genes_groups_gene_names']):
if j >= n_genes:
break
name_list.append(adata.uns['rank_genes_groups_gene_names'][j][group])
# TODO: For the moment, see that everything works for comparison against the rest. Resolve issues later.
groups = 'all'
groups_order, groups_masks = utils.select_groups(
adata, groups, groupby)
# Use usual convention, better for looping later.
imask = group
mask = groups_masks[group]
# TODO: Allow for sample weighting requires better mask access... later
# We store calculated data in dict, access it via dict to dict. Check if this is the best way.
fpr={}
tpr={}
thresholds={}
roc_auc={}
y_true=mask
for i, j in enumerate(name_list):
vec=adata[:,[j]].X
if issparse(vec):
y_score = vec.todense()
else:
y_score = vec
fpr[name_list[i]], tpr[name_list[i]], thresholds[name_list[i]] = metrics.roc_curve(y_true, y_score, pos_label=None, sample_weight=None, drop_intermediate=False)
roc_auc[name_list[i]]=metrics.auc(fpr[name_list[i]],tpr[name_list[i]])
adata.uns['ROCfpr' +groupby+ str(group)] = fpr
adata.uns['ROCtpr' +groupby+ str(group)] = tpr
adata.uns['ROCthresholds' +groupby+ str(group)] = thresholds
adata.uns['ROC_AUC' + groupby + str(group)] = roc_auc
|
Calculate correlation matrix.
Calculate a correlation matrix for genes strored in sample annotation using rank_genes_groups.py
Parameters
----------
adata : :class:`~anndata.AnnData`
Annotated data matrix.
groupby : `str`
The key of the sample grouping to consider.
group : `str`, int, optional (default: None)
Group name or index for which the correlation matrix for top_ranked genes should be calculated.
If no parameter is passed, ROC/AUC is calculated for all groups
n_genes : `int`, optional (default: 100)
For how many genes to calculate ROC and AUC. If no parameter is passed, calculation is done for
all stored top ranked genes.
|
def authorize(self, callback=None, state=None, **kwargs):
"""
Returns a redirect response to the remote authorization URL with
the signed callback given.
:param callback: a redirect url for the callback
:param state: an optional value to embed in the OAuth request.
Use this if you want to pass around application
state (e.g. CSRF tokens).
:param kwargs: add optional key/value pairs to the query string
"""
params = dict(self.request_token_params) or {}
params.update(**kwargs)
if self.request_token_url:
token = self.generate_request_token(callback)[0]
url = '%s?oauth_token=%s' % (
self.expand_url(self.authorize_url), url_quote(token)
)
if params:
url += '&' + url_encode(params)
else:
assert callback is not None, 'Callback is required for OAuth2'
client = self.make_client()
if 'scope' in params:
scope = params.pop('scope')
else:
scope = None
if isinstance(scope, str):
# oauthlib need unicode
scope = _encode(scope, self.encoding)
if 'state' in params:
if not state:
state = params.pop('state')
else:
# remove state in params
params.pop('state')
if callable(state):
# state can be function for generate a random string
state = state()
session['%s_oauthredir' % self.name] = callback
url = client.prepare_request_uri(
self.expand_url(self.authorize_url),
redirect_uri=callback,
scope=scope,
state=state,
**params
)
return redirect(url)
|
Returns a redirect response to the remote authorization URL with
the signed callback given.
:param callback: a redirect url for the callback
:param state: an optional value to embed in the OAuth request.
Use this if you want to pass around application
state (e.g. CSRF tokens).
:param kwargs: add optional key/value pairs to the query string
|
def flush(self):
"""flush() -> List of decoded messages.
Decodes the packets in the internal buffer.
This enables the continuation of the processing
of received packets after a :exc:`ProtocolError`
has been handled.
:return: A (possibly empty) list of decoded messages from the buffered packets.
:rtype: list(bytes)
:raises ProtocolError: An invalid byte sequence has been detected.
"""
messages = []
while self._packets:
p = self._packets.popleft()
try:
msg = decode(p)
except ProtocolError:
# Add any already decoded messages to the exception
self._messages = messages
raise
messages.append(msg)
return messages
|
flush() -> List of decoded messages.
Decodes the packets in the internal buffer.
This enables the continuation of the processing
of received packets after a :exc:`ProtocolError`
has been handled.
:return: A (possibly empty) list of decoded messages from the buffered packets.
:rtype: list(bytes)
:raises ProtocolError: An invalid byte sequence has been detected.
|
def Close(self):
"""Closes the database file.
Raises:
RuntimeError: if the database is not opened.
"""
if not self._connection:
raise RuntimeError('Cannot close database not opened.')
# We need to run commit or not all data is stored in the database.
self._connection.commit()
self._connection.close()
self._connection = None
self._cursor = None
self.filename = None
self.read_only = None
|
Closes the database file.
Raises:
RuntimeError: if the database is not opened.
|
def graphviz_parser(preprocessor, tag, markup):
""" Simple Graphviz parser """
# Parse the markup string
m = DOT_BLOCK_RE.search(markup)
if m:
# Get program and DOT code
code = m.group('code')
program = m.group('program').strip()
# Run specified program with our markup
output = run_graphviz(program, code)
# Return Base64 encoded image
return '<span class="graphviz" style="text-align: center;"><img src="data:image/png;base64,%s"></span>' % base64.b64encode(output).decode('utf-8')
else:
raise ValueError('Error processing input. '
'Expected syntax: {0}'.format(SYNTAX))
|
Simple Graphviz parser
|
def _set_minimum_links(self, v, load=False):
"""
Setter method for minimum_links, mapped from YANG variable /interface/port_channel/minimum_links (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_minimum_links is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_minimum_links() directly.
YANG Description: The least number of operationally 'UP' links to
indicate port-channel being UP.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 64']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="minimum-links", rest_name="minimum-links", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'interface_po', u'info': u'Least number of operationally UP links to declare \nport-channel UP', u'display-when': u'not(../insight/insight-enable)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='uint32', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """minimum_links must be of a type compatible with uint32""",
'defined-type': "uint32",
'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1 .. 64']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="minimum-links", rest_name="minimum-links", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'callpoint': u'interface_po', u'info': u'Least number of operationally UP links to declare \nport-channel UP', u'display-when': u'not(../insight/insight-enable)'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='uint32', is_config=True)""",
})
self.__minimum_links = t
if hasattr(self, '_set'):
self._set()
|
Setter method for minimum_links, mapped from YANG variable /interface/port_channel/minimum_links (uint32)
If this variable is read-only (config: false) in the
source YANG file, then _set_minimum_links is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_minimum_links() directly.
YANG Description: The least number of operationally 'UP' links to
indicate port-channel being UP.
|
def disconnect(self, si, logger, vcenter_data_model, vm_uuid, network_name=None, vm=None):
"""
disconnect network adapter of the vm. If 'network_name' = None - disconnect ALL interfaces
:param <str> si:
:param logger:
:param VMwarevCenterResourceModel vcenter_data_model:
:param <str> vm_uuid: the uuid of the vm
:param <str | None> network_name: the name of the specific network to disconnect
:param <pyvmomi vm object> vm: If the vm obj is None will use vm_uuid to fetch the object
:return: Started Task
"""
logger.debug("Disconnect Interface VM: '{0}' Network: '{1}' ...".format(vm_uuid, network_name or "ALL"))
if vm is None:
vm = self.pyvmomi_service.find_by_uuid(si, vm_uuid)
if not vm:
return "Warning: failed to locate vm {0} in vCenter".format(vm_uuid)
if network_name:
network = self.pyvmomi_service.vm_get_network_by_name(vm, network_name)
if network is None:
raise KeyError('Network not found ({0})'.format(network_name))
else:
network = None
network_full_name = VMLocation.combine(
[vcenter_data_model.default_datacenter, vcenter_data_model.holding_network])
default_network = self.pyvmomi_service.get_network_by_full_name(si, network_full_name)
if network:
return self.port_group_configurer.disconnect_network(vm, network, default_network,
vcenter_data_model.reserved_networks,
logger=logger)
else:
return self.port_group_configurer.disconnect_all_networks(vm, default_network,
vcenter_data_model.reserved_networks,
logger=logger)
|
disconnect network adapter of the vm. If 'network_name' = None - disconnect ALL interfaces
:param <str> si:
:param logger:
:param VMwarevCenterResourceModel vcenter_data_model:
:param <str> vm_uuid: the uuid of the vm
:param <str | None> network_name: the name of the specific network to disconnect
:param <pyvmomi vm object> vm: If the vm obj is None will use vm_uuid to fetch the object
:return: Started Task
|
def traverse_pagination(response, endpoint, content_filter_query, query_params):
"""
Traverse a paginated API response and extracts and concatenates "results" returned by API.
Arguments:
response (dict): API response object.
endpoint (Slumber.Resource): API endpoint object.
content_filter_query (dict): query parameters used to filter catalog results.
query_params (dict): query parameters used to paginate results.
Returns:
list: all the results returned by the API.
"""
results = response.get('results', [])
page = 1
while response.get('next'):
page += 1
response = endpoint().post(content_filter_query, **dict(query_params, page=page))
results += response.get('results', [])
return results
|
Traverse a paginated API response and extracts and concatenates "results" returned by API.
Arguments:
response (dict): API response object.
endpoint (Slumber.Resource): API endpoint object.
content_filter_query (dict): query parameters used to filter catalog results.
query_params (dict): query parameters used to paginate results.
Returns:
list: all the results returned by the API.
|
def configure_api(app):
"""Configure API Endpoints.
"""
from heman.api.empowering import resources as empowering_resources
from heman.api.cch import resources as cch_resources
from heman.api.form import resources as form_resources
from heman.api import ApiCatchall
# Add Empowering resources
for resource in empowering_resources:
api.add_resource(*resource)
# Add CCHFact resources
for resource in cch_resources:
api.add_resource(*resource)
# Add Form resources
for resource in form_resources:
api.add_resource(*resource)
api.add_resource(ApiCatchall, '/<path:path>')
api.init_app(app)
|
Configure API Endpoints.
|
def end_of_month(val):
"""
Return a new datetime.datetime object with values that represent
a end of a month.
:param val: Date to ...
:type val: datetime.datetime | datetime.date
:rtype: datetime.datetime
"""
if type(val) == date:
val = datetime.fromordinal(val.toordinal())
if val.month == 12:
return start_of_month(val).replace(year=val.year + 1, month=1) \
- timedelta(microseconds=1)
else:
return start_of_month(val).replace(month=val.month + 1) \
- timedelta(microseconds=1)
|
Return a new datetime.datetime object with values that represent
a end of a month.
:param val: Date to ...
:type val: datetime.datetime | datetime.date
:rtype: datetime.datetime
|
def _get_labels(self, y):
"""
Construct pylearn2 dataset labels.
Parameters
----------
y : array_like, optional
Labels.
"""
y = np.asarray(y)
assert y.ndim == 1
# convert to one-hot
labels = np.unique(y).tolist()
oh = np.zeros((y.size, len(labels)), dtype=float)
for i, label in enumerate(y):
oh[i, labels.index(label)] = 1.
return oh
|
Construct pylearn2 dataset labels.
Parameters
----------
y : array_like, optional
Labels.
|
def decode(data_url):
"""
Decode DataURL data
"""
metadata, data = data_url.rsplit(',', 1)
_, metadata = metadata.split('data:', 1)
parts = metadata.split(';')
if parts[-1] == 'base64':
data = b64decode(data)
else:
data = unquote(data)
for part in parts:
if part.startswith("charset="):
data = data.decode(part[8:])
return data
|
Decode DataURL data
|
def http(self):
"""A thread local instance of httplib2.Http.
Returns:
httplib2.Http: An Http instance authorized by the credentials.
"""
if self._use_cached_http and hasattr(self._local, 'http'):
return self._local.http
if self._http_replay is not None:
# httplib2 instance is not thread safe
http = self._http_replay
else:
http = _build_http()
authorized_http = google_auth_httplib2.AuthorizedHttp(
self._credentials, http=http)
if self._use_cached_http:
self._local.http = authorized_http
return authorized_http
|
A thread local instance of httplib2.Http.
Returns:
httplib2.Http: An Http instance authorized by the credentials.
|
def os_version(self, value):
"""The os_version property.
Args:
value (string). the property value.
"""
if value == self._defaults['ai.device.osVersion'] and 'ai.device.osVersion' in self._values:
del self._values['ai.device.osVersion']
else:
self._values['ai.device.osVersion'] = value
|
The os_version property.
Args:
value (string). the property value.
|
def get_allowed_operations(resource, subresouce_path):
"""Helper function to get the HTTP allowed methods.
:param resource: ResourceBase instance from which the path is loaded.
:param subresource_path: JSON field to fetch the value from.
Either a string, or a list of strings in case of a nested field.
:returns: A list of allowed HTTP methods.
"""
uri = get_subresource_path_by(resource, subresouce_path)
response = resource._conn.get(path=uri)
return response.headers['Allow']
|
Helper function to get the HTTP allowed methods.
:param resource: ResourceBase instance from which the path is loaded.
:param subresource_path: JSON field to fetch the value from.
Either a string, or a list of strings in case of a nested field.
:returns: A list of allowed HTTP methods.
|
def convert_to_dataset(obj, *, group="posterior", coords=None, dims=None):
"""Convert a supported object to an xarray dataset.
This function is idempotent, in that it will return xarray.Dataset functions
unchanged. Raises `ValueError` if the desired group can not be extracted.
Note this goes through a DataInference object. See `convert_to_inference_data`
for more details. Raises ValueError if it can not work out the desired
conversion.
Parameters
----------
obj : dict, str, np.ndarray, xr.Dataset, pystan fit, pymc3 trace
A supported object to convert to InferenceData:
InferenceData: returns unchanged
str: Attempts to load the netcdf dataset from disk
pystan fit: Automatically extracts data
pymc3 trace: Automatically extracts data
xarray.Dataset: adds to InferenceData as only group
dict: creates an xarray dataset as the only group
numpy array: creates an xarray dataset as the only group, gives the
array an arbitrary name
group : str
If `obj` is a dict or numpy array, assigns the resulting xarray
dataset to this group.
coords : dict[str, iterable]
A dictionary containing the values that are used as index. The key
is the name of the dimension, the values are the index values.
dims : dict[str, List(str)]
A mapping from variables to a list of coordinate names for the variable
Returns
-------
xarray.Dataset
"""
inference_data = convert_to_inference_data(obj, group=group, coords=coords, dims=dims)
dataset = getattr(inference_data, group, None)
if dataset is None:
raise ValueError(
"Can not extract {group} from {obj}! See {filename} for other "
"conversion utilities.".format(group=group, obj=obj, filename=__file__)
)
return dataset
|
Convert a supported object to an xarray dataset.
This function is idempotent, in that it will return xarray.Dataset functions
unchanged. Raises `ValueError` if the desired group can not be extracted.
Note this goes through a DataInference object. See `convert_to_inference_data`
for more details. Raises ValueError if it can not work out the desired
conversion.
Parameters
----------
obj : dict, str, np.ndarray, xr.Dataset, pystan fit, pymc3 trace
A supported object to convert to InferenceData:
InferenceData: returns unchanged
str: Attempts to load the netcdf dataset from disk
pystan fit: Automatically extracts data
pymc3 trace: Automatically extracts data
xarray.Dataset: adds to InferenceData as only group
dict: creates an xarray dataset as the only group
numpy array: creates an xarray dataset as the only group, gives the
array an arbitrary name
group : str
If `obj` is a dict or numpy array, assigns the resulting xarray
dataset to this group.
coords : dict[str, iterable]
A dictionary containing the values that are used as index. The key
is the name of the dimension, the values are the index values.
dims : dict[str, List(str)]
A mapping from variables to a list of coordinate names for the variable
Returns
-------
xarray.Dataset
|
def thresholdForIdentity(identity, colors):
"""
Get the best identity threshold for a specific identity value.
@param identity: A C{float} nucleotide identity.
@param colors: A C{list} of (threshold, color) tuples, where threshold is a
C{float} and color is a C{str} to be used as a cell background. This
is as returned by C{parseColors}.
@return: The first C{float} threshold that the given identity is at least
as big as.
"""
for threshold, _ in colors:
if identity >= threshold:
return threshold
raise ValueError('This should never happen! Last threshold is not 0.0?')
|
Get the best identity threshold for a specific identity value.
@param identity: A C{float} nucleotide identity.
@param colors: A C{list} of (threshold, color) tuples, where threshold is a
C{float} and color is a C{str} to be used as a cell background. This
is as returned by C{parseColors}.
@return: The first C{float} threshold that the given identity is at least
as big as.
|
def datetime_to_time(date, time):
"""Take the date and time 4-tuples and return the time in seconds since
the epoch as a floating point number."""
if (255 in date) or (255 in time):
raise RuntimeError("specific date and time required")
time_tuple = (
date[0]+1900, date[1], date[2],
time[0], time[1], time[2],
0, 0, -1,
)
return _mktime(time_tuple)
|
Take the date and time 4-tuples and return the time in seconds since
the epoch as a floating point number.
|
def del_character(self, name):
"""Remove the Character from the database entirely.
This also deletes all its history. You'd better be sure.
"""
self.query.del_character(name)
self.del_graph(name)
del self.character[name]
|
Remove the Character from the database entirely.
This also deletes all its history. You'd better be sure.
|
def truncate_table(self, tablename):
"""
SQLite3 doesn't support direct truncate, so we just use delete here
"""
self.get(tablename).remove()
self.db.commit()
|
SQLite3 doesn't support direct truncate, so we just use delete here
|
def lookup(parser, var, context, resolve=True, apply_filters=True):
"""
Try to resolve the varialbe in a context
If ``resolve`` is ``False``, only string variables are returned
"""
if resolve:
try:
return Variable(var).resolve(context)
except VariableDoesNotExist:
if apply_filters and var.find('|') > -1:
return parser.compile_filter(var).resolve(context)
return Constant(var)
except TypeError:
# already resolved
return var
return var
|
Try to resolve the varialbe in a context
If ``resolve`` is ``False``, only string variables are returned
|
def from_value(value):
"""
Converts specified value into PagingParams.
:param value: value to be converted
:return: a newly created PagingParams.
"""
if isinstance(value, PagingParams):
return value
if isinstance(value, AnyValueMap):
return PagingParams.from_map(value)
map = AnyValueMap.from_value(value)
return PagingParams.from_map(map)
|
Converts specified value into PagingParams.
:param value: value to be converted
:return: a newly created PagingParams.
|
def try_log_part(self, context=None, with_start_message=True):
"""
Залогировать, если пришло время из part_log_time_minutes
:return: boolean Возвращает True если лог был записан
"""
if context is None:
context = {}
self.__counter += 1
if time.time() - self.__begin_time > self.__part_log_time_seconds:
self.__begin_time = time.time()
context['count'] = self.__counter
if self.__total:
self.__percent_done = int(self.__counter * 100 / self.__total)
context['percentDone'] = self.__percent_done
context['total'] = self.__total
self.__log.info(msg=self.__log_message, context=context)
return True
elif self.__counter == 1:
if with_start_message:
self.__log.info(u"Начали цикл: " + self.__log_message)
return True
return False
|
Залогировать, если пришло время из part_log_time_minutes
:return: boolean Возвращает True если лог был записан
|
def set_sample_weight(pipeline_steps, sample_weight=None):
"""Recursively iterates through all objects in the pipeline and sets sample weight.
Parameters
----------
pipeline_steps: array-like
List of (str, obj) tuples from a scikit-learn pipeline or related object
sample_weight: array-like
List of sample weight
Returns
-------
sample_weight_dict:
A dictionary of sample_weight
"""
sample_weight_dict = {}
if not isinstance(sample_weight, type(None)):
for (pname, obj) in pipeline_steps:
if inspect.getargspec(obj.fit).args.count('sample_weight'):
step_sw = pname + '__sample_weight'
sample_weight_dict[step_sw] = sample_weight
if sample_weight_dict:
return sample_weight_dict
else:
return None
|
Recursively iterates through all objects in the pipeline and sets sample weight.
Parameters
----------
pipeline_steps: array-like
List of (str, obj) tuples from a scikit-learn pipeline or related object
sample_weight: array-like
List of sample weight
Returns
-------
sample_weight_dict:
A dictionary of sample_weight
|
def File(self, name, directory = None, create = 1):
"""Look up or create a File node with the specified name. If
the name is a relative path (begins with ./, ../, or a file name),
then it is looked up relative to the supplied directory node,
or to the top level directory of the FS (supplied at construction
time) if no directory is supplied.
This method will raise TypeError if a directory is found at the
specified path.
"""
return self._lookup(name, directory, File, create)
|
Look up or create a File node with the specified name. If
the name is a relative path (begins with ./, ../, or a file name),
then it is looked up relative to the supplied directory node,
or to the top level directory of the FS (supplied at construction
time) if no directory is supplied.
This method will raise TypeError if a directory is found at the
specified path.
|
def mmatch(expr,
delimiter,
greedy,
search_type,
regex_match=False,
exact_match=False,
opts=None):
'''
Helper function to search for minions in master caches
If 'greedy' return accepted minions that matched by the condition or absent in the cache.
If not 'greedy' return the only minions have cache data and matched by the condition.
'''
if not opts:
opts = __opts__
ckminions = salt.utils.minions.CkMinions(opts)
return ckminions._check_cache_minions(expr, delimiter, greedy,
search_type, regex_match=regex_match,
exact_match=exact_match)
|
Helper function to search for minions in master caches
If 'greedy' return accepted minions that matched by the condition or absent in the cache.
If not 'greedy' return the only minions have cache data and matched by the condition.
|
def flavor_list(request):
"""Utility method to retrieve a list of flavors."""
try:
return api.nova.flavor_list(request)
except Exception:
exceptions.handle(request,
_('Unable to retrieve instance flavors.'))
return []
|
Utility method to retrieve a list of flavors.
|
def getDetailInfo(self, CorpNum, ItemCode, MgtKey):
""" 전자명세서 상세정보 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
return
문서 상세정보 object
raise
PopbillException
"""
if MgtKey == None or MgtKey == "":
raise PopbillException(-99999999, "관리번호가 입력되지 않았습니다.")
if ItemCode == None or ItemCode == "":
raise PopbillException(-99999999, "명세서 종류 코드가 입력되지 않았습니다.")
return self._httpget('/Statement/' + str(ItemCode) + '/' + MgtKey + '?Detail', CorpNum)
|
전자명세서 상세정보 확인
args
CorpNum : 팝빌회원 사업자번호
ItemCode : 명세서 종류 코드
[121 - 거래명세서], [122 - 청구서], [123 - 견적서],
[124 - 발주서], [125 - 입금표], [126 - 영수증]
MgtKey : 파트너 문서관리번호
return
문서 상세정보 object
raise
PopbillException
|
def find_file( self, folder_id, basename, limit = 500 ):
'''
Finds a file based on a box path
Returns a list of file IDs
Returns multiple file IDs if the file was split into parts with the extension '.partN' (where N is an integer)
'''
search_folder = self.client.folder( folder_id = folder_id )
offset = 0
search_items = search_folder.get_items( limit = limit, offset = offset )
found_files = []
while len(search_items) > 0:
files = [ (f['id'], f['name']) for f in search_items if f['name'].startswith( basename ) and f['type'] == 'file' ]
files.sort()
for f_id, f_name in files:
assert(
f_name == basename
or
( f_name.startswith( basename ) and f_name[len(basename):len(basename)+5] == '.part' )
)
found_files.extend( files )
offset += limit
search_items = search_folder.get_items( limit = limit, offset = offset )
return [f[0] for f in found_files]
|
Finds a file based on a box path
Returns a list of file IDs
Returns multiple file IDs if the file was split into parts with the extension '.partN' (where N is an integer)
|
def fetch_session_start_times(data_dir, pivot, session_dates):
"""
:param data_dir: (str) directory in which the output file will be saved
:param pivot: (int) congressperson document to use as a pivot for scraping the data
:param session_dates: (list) datetime objects to fetch the start times for
"""
session_start_times = SessionStartTimesDataset()
df = session_start_times.fetch(pivot, session_dates)
save_to_csv(df, data_dir, "session-start-times")
log.info("Dates requested:", len(session_dates))
found = pd.to_datetime(df['date'], format="%Y-%m-%d %H:%M:%S").dt.date.unique()
log.info("Dates found:", len(found))
return df
|
:param data_dir: (str) directory in which the output file will be saved
:param pivot: (int) congressperson document to use as a pivot for scraping the data
:param session_dates: (list) datetime objects to fetch the start times for
|
def generate_sinusoidal_lightcurve(
times,
mags=None,
errs=None,
paramdists={
'period':sps.uniform(loc=0.04,scale=500.0),
'fourierorder':[2,10],
'amplitude':sps.uniform(loc=0.1,scale=0.9),
'phioffset':0.0,
},
magsarefluxes=False
):
'''This generates fake sinusoidal light curves.
This can be used for a variety of sinusoidal variables, e.g. RRab, RRc,
Cepheids, Miras, etc. The functions that generate these model LCs below
implement the following table::
## FOURIER PARAMS FOR SINUSOIDAL VARIABLES
#
# type fourier period [days]
# order dist limits dist
# RRab 8 to 10 uniform 0.45--0.80 uniform
# RRc 3 to 6 uniform 0.10--0.40 uniform
# HADS 7 to 9 uniform 0.04--0.10 uniform
# rotator 2 to 5 uniform 0.80--120.0 uniform
# LPV 2 to 5 uniform 250--500.0 uniform
FIXME: for better model LCs, figure out how scipy.signal.butter works and
low-pass filter using scipy.signal.filtfilt.
Parameters
----------
times : np.array
This is an array of time values that will be used as the time base.
mags,errs : np.array
These arrays will have the model added to them. If either is
None, `np.full_like(times, 0.0)` will used as a substitute and the model
light curve will be centered around 0.0.
paramdists : dict
This is a dict containing parameter distributions to use for the
model params, containing the following keys ::
{'period', 'fourierorder', 'amplitude', 'phioffset'}
The values of these keys should all be 'frozen' scipy.stats distribution
objects, e.g.:
https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions
The variability epoch will be automatically chosen from a uniform
distribution between `times.min()` and `times.max()`.
The `amplitude` will be flipped automatically as appropriate if
`magsarefluxes=True`.
magsarefluxes : bool
If the generated time series is meant to be a flux time-series, set this
to True to get the correct sign of variability amplitude.
Returns
-------
dict
A dict of the form below is returned::
{'vartype': 'sinusoidal',
'params': {'period': generated value of period,
'epoch': generated value of epoch,
'amplitude': generated value of amplitude,
'fourierorder': generated value of fourier order,
'fourieramps': generated values of fourier amplitudes,
'fourierphases': generated values of fourier phases},
'times': the model times,
'mags': the model mags,
'errs': the model errs,
'varperiod': the generated period of variability == 'period'
'varamplitude': the generated amplitude of
variability == 'amplitude'}
'''
if mags is None:
mags = np.full_like(times, 0.0)
if errs is None:
errs = np.full_like(times, 0.0)
# choose the epoch
epoch = npr.random()*(times.max() - times.min()) + times.min()
# choose the period, fourierorder, and amplitude
period = paramdists['period'].rvs(size=1)
fourierorder = npr.randint(paramdists['fourierorder'][0],
high=paramdists['fourierorder'][1])
amplitude = paramdists['amplitude'].rvs(size=1)
# fix the amplitude if it needs to be flipped
if magsarefluxes and amplitude < 0.0:
amplitude = -amplitude
elif not magsarefluxes and amplitude > 0.0:
amplitude = -amplitude
# generate the amplitudes and phases of the Fourier components
ampcomps = [abs(amplitude/2.0)/float(x)
for x in range(1,fourierorder+1)]
phacomps = [paramdists['phioffset']*float(x)
for x in range(1,fourierorder+1)]
# now that we have our amp and pha components, generate the light curve
modelmags, phase, ptimes, pmags, perrs = sinusoidal.sine_series_sum(
[period, epoch, ampcomps, phacomps],
times,
mags,
errs
)
# resort in original time order
timeind = np.argsort(ptimes)
mtimes = ptimes[timeind]
mmags = modelmags[timeind]
merrs = perrs[timeind]
mphase = phase[timeind]
# return a dict with everything
modeldict = {
'vartype':'sinusoidal',
'params':{x:y for x,y in zip(['period',
'epoch',
'amplitude',
'fourierorder',
'fourieramps',
'fourierphases'],
[period,
epoch,
amplitude,
fourierorder,
ampcomps,
phacomps])},
'times':mtimes,
'mags':mmags,
'errs':merrs,
'phase':mphase,
# these are standard keys that help with later characterization of
# variability as a function period, variability amplitude, object mag,
# ndet, etc.
'varperiod':period,
'varamplitude':amplitude
}
return modeldict
|
This generates fake sinusoidal light curves.
This can be used for a variety of sinusoidal variables, e.g. RRab, RRc,
Cepheids, Miras, etc. The functions that generate these model LCs below
implement the following table::
## FOURIER PARAMS FOR SINUSOIDAL VARIABLES
#
# type fourier period [days]
# order dist limits dist
# RRab 8 to 10 uniform 0.45--0.80 uniform
# RRc 3 to 6 uniform 0.10--0.40 uniform
# HADS 7 to 9 uniform 0.04--0.10 uniform
# rotator 2 to 5 uniform 0.80--120.0 uniform
# LPV 2 to 5 uniform 250--500.0 uniform
FIXME: for better model LCs, figure out how scipy.signal.butter works and
low-pass filter using scipy.signal.filtfilt.
Parameters
----------
times : np.array
This is an array of time values that will be used as the time base.
mags,errs : np.array
These arrays will have the model added to them. If either is
None, `np.full_like(times, 0.0)` will used as a substitute and the model
light curve will be centered around 0.0.
paramdists : dict
This is a dict containing parameter distributions to use for the
model params, containing the following keys ::
{'period', 'fourierorder', 'amplitude', 'phioffset'}
The values of these keys should all be 'frozen' scipy.stats distribution
objects, e.g.:
https://docs.scipy.org/doc/scipy/reference/stats.html#continuous-distributions
The variability epoch will be automatically chosen from a uniform
distribution between `times.min()` and `times.max()`.
The `amplitude` will be flipped automatically as appropriate if
`magsarefluxes=True`.
magsarefluxes : bool
If the generated time series is meant to be a flux time-series, set this
to True to get the correct sign of variability amplitude.
Returns
-------
dict
A dict of the form below is returned::
{'vartype': 'sinusoidal',
'params': {'period': generated value of period,
'epoch': generated value of epoch,
'amplitude': generated value of amplitude,
'fourierorder': generated value of fourier order,
'fourieramps': generated values of fourier amplitudes,
'fourierphases': generated values of fourier phases},
'times': the model times,
'mags': the model mags,
'errs': the model errs,
'varperiod': the generated period of variability == 'period'
'varamplitude': the generated amplitude of
variability == 'amplitude'}
|
def get_clamav_conf(filename):
"""Initialize clamav configuration."""
if os.path.isfile(filename):
return ClamavConfig(filename)
log.warn(LOG_PLUGIN, "No ClamAV config file found at %r.", filename)
|
Initialize clamav configuration.
|
def waitForSlotEvent(self, flags=0):
"""
C_WaitForSlotEvent
:param flags: 0 (default) or `CKF_DONT_BLOCK`
:type flags: integer
:return: slot
:rtype: integer
"""
tmp = 0
(rv, slot) = self.lib.C_WaitForSlotEvent(flags, tmp)
if rv != CKR_OK:
raise PyKCS11Error(rv)
return slot
|
C_WaitForSlotEvent
:param flags: 0 (default) or `CKF_DONT_BLOCK`
:type flags: integer
:return: slot
:rtype: integer
|
def unique_id(self):
"""Creates a unique ID for the `Atom` based on its parents.
Returns
-------
unique_id : (str, str, str)
(polymer.id, residue.id, atom.id)
"""
chain = self.parent.parent.id
residue = self.parent.id
return chain, residue, self.id
|
Creates a unique ID for the `Atom` based on its parents.
Returns
-------
unique_id : (str, str, str)
(polymer.id, residue.id, atom.id)
|
def parse_ical(vcal):
'''Parse Opencast schedule iCalendar file and return events as dict
'''
vcal = vcal.replace('\r\n ', '').replace('\r\n\r\n', '\r\n')
vevents = vcal.split('\r\nBEGIN:VEVENT\r\n')
del(vevents[0])
events = []
for vevent in vevents:
event = {}
for line in vevent.split('\r\n'):
line = line.split(':', 1)
key = line[0].lower()
if len(line) <= 1 or key == 'end':
continue
if key.startswith('dt'):
event[key] = unix_ts(dateutil.parser.parse(line[1]))
continue
if not key.startswith('attach'):
event[key] = line[1]
continue
# finally handle attachments
event['attach'] = event.get('attach', [])
attachment = {}
for x in [x.split('=') for x in line[0].split(';')]:
if x[0].lower() in ['fmttype', 'x-apple-filename']:
attachment[x[0].lower()] = x[1]
attachment['data'] = b64decode(line[1]).decode('utf-8')
event['attach'].append(attachment)
events.append(event)
return events
|
Parse Opencast schedule iCalendar file and return events as dict
|
def stage_http_request(self, conn_id, version, url, target, method, headers,
payload):
"""Set request HTTP information including url, headers, etc."""
# pylint: disable=attribute-defined-outside-init
self._http_request_version = version
self._http_request_conn_id = conn_id
self._http_request_url = url
self._http_request_target = target
self._http_request_method = method
self._http_request_headers = headers
self._http_request_payload = payload
|
Set request HTTP information including url, headers, etc.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.