code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def update_compaction(model):
"""Updates the compaction options for the given model if necessary.
:param model: The model to update.
:return: `True`, if the compaction options were modified in Cassandra,
`False` otherwise.
:rtype: bool
"""
logger.debug("Checking %s for compaction differences", model)
table = get_table_settings(model)
existing_options = table.options.copy()
existing_compaction_strategy = existing_options['compaction_strategy_class']
existing_options = json.loads(existing_options['compaction_strategy_options'])
desired_options = get_compaction_options(model)
desired_compact_strategy = desired_options.get('class', SizeTieredCompactionStrategy)
desired_options.pop('class', None)
do_update = False
if desired_compact_strategy not in existing_compaction_strategy:
do_update = True
for k, v in desired_options.items():
val = existing_options.pop(k, None)
if val != v:
do_update = True
# check compaction_strategy_options
if do_update:
options = get_compaction_options(model)
# jsonify
options = json.dumps(options).replace('"', "'")
cf_name = model.column_family_name()
query = "ALTER TABLE {} with compaction = {}".format(cf_name, options)
logger.debug(query)
execute(query)
return True
return False
|
Updates the compaction options for the given model if necessary.
:param model: The model to update.
:return: `True`, if the compaction options were modified in Cassandra,
`False` otherwise.
:rtype: bool
|
def load_history(self) -> List["IterationRecord"]:
"""
Load messaging history from disk to self.
:returns: List of iteration records comprising history.
"""
if path.isfile(self.history_filename):
with open(self.history_filename, "r") as f:
try:
dicts = json.load(f)
except json.decoder.JSONDecodeError as e:
self.log.error(f"Got error \n{e}\n decoding JSON history, overwriting it.\n"
f"Former history available in {self.history_filename}.bak")
copyfile(self.history_filename, f"{self.history_filename}.bak")
return []
history: List[IterationRecord] = []
for hdict_pre in dicts:
if "_type" in hdict_pre and hdict_pre["_type"] == IterationRecord.__name__:
# repair any corrupted entries
hdict = _repair(hdict_pre)
record = IterationRecord.from_dict(hdict)
history.append(record)
# Be sure to handle legacy tweetrecord-only histories.
# Assume anything without our new _type (which should have been there from the
# start, whoops) is a legacy history.
else:
item = IterationRecord()
# Lift extra keys up to upper record (if they exist).
extra_keys = hdict_pre.pop("extra_keys", {})
item.extra_keys = extra_keys
hdict_obj = TweetRecord.from_dict(hdict_pre)
# Lift timestamp up to upper record.
item.timestamp = hdict_obj.timestamp
item.output_records["birdsite"] = hdict_obj
history.append(item)
self.log.debug(f"Loaded history:\n {history}")
return history
else:
return []
|
Load messaging history from disk to self.
:returns: List of iteration records comprising history.
|
def safe_unicode(e):
"""unicode(e) with various fallbacks. Used for exceptions, which may not be
safe to call unicode() on.
"""
try:
return unicode(e)
except UnicodeError:
pass
try:
return py3compat.str_to_unicode(str(e))
except UnicodeError:
pass
try:
return py3compat.str_to_unicode(repr(e))
except UnicodeError:
pass
return u'Unrecoverably corrupt evalue'
|
unicode(e) with various fallbacks. Used for exceptions, which may not be
safe to call unicode() on.
|
def filter(self, **search_args):
"""
Get a filtered list of resources
:param search_args: To be translated into ?arg1=value1&arg2=value2...
:return: A list of resources
"""
search_args = search_args or {}
raw_resources = []
for url, paginator_params in self.paginator.get_urls(self.get_collection_endpoint()):
search_args.update(paginator_params)
response = self.paginator.process_response(self.send(url, "get", params=search_args))
raw_resources += self.client.get_response_data(response, self.Meta.parse_json)[self.json_collection_attribute] if self.json_collection_attribute is not None else self.client.get_response_data(response, self.Meta.parse_json)
resources = []
for raw_resource in raw_resources:
try:
resource = self.resource_class(self.client)
except (ValueError, TypeError):
continue
else:
resource.update_from_dict(raw_resource)
resources.append(resource)
return resources
|
Get a filtered list of resources
:param search_args: To be translated into ?arg1=value1&arg2=value2...
:return: A list of resources
|
def link_android(self, path, pkg):
""" Link's the android project to this library.
1. Includes this project's directory in the app's
android/settings.gradle
It adds:
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir, '../packages/<project-name>/android')
2. Add's this project as a dependency to the android/app/build.gradle
It adds:
compile project(':<project-name>')
to the dependencies.
3. If preset, adds the import and package statement
to the android/app/src/main/java/<bundle/id>/MainApplication.java
"""
bundle_id = self.ctx['bundle_id']
pkg_root = join(path, pkg)
#: Check if it's already linked
with open(join('android', 'settings.gradle')) as f:
settings_gradle = f.read()
with open(join('android', 'app', 'build.gradle')) as f:
build_gradle = f.read()
#: Find the MainApplication.java
main_app_java_path = join('android', 'app', 'src', 'main', 'java',
join(*bundle_id.split(".")),
'MainApplication.java')
with open(main_app_java_path) as f:
main_application_java = f.read()
try:
#: Now link all the EnamlPackages we can find in the new "package"
new_packages = Link.find_packages(join(path, pkg))
if not new_packages:
print("[Android] {} No EnamlPackages found to link!".format(
pkg))
return
#: Link settings.gradle
if not Link.is_settings_linked(settings_gradle, pkg):
#: Add two statements
new_settings = settings_gradle.split("\n")
new_settings.append("") # Blank line
new_settings.append("include ':{name}'".format(name=pkg))
new_settings.append("project(':{name}').projectDir = "
"new File(rootProject.projectDir, "
"'../{path}/android/{name}')"
.format(name=pkg, path=self.package_dir))
with open(join('android', 'settings.gradle'), 'w') as f:
f.write("\n".join(new_settings))
print("[Android] {} linked in settings.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"settings.gradle!".format(pkg))
#: Link app/build.gradle
if not Link.is_build_linked(build_gradle, pkg):
#: Add two statements
new_build = build_gradle.split("\n")
#: Find correct line number
found = False
for i, line in enumerate(new_build):
if re.match(r"dependencies\s*{", line):
found = True
continue
if found and "}" in line:
#: Hackish way to find line of the closing bracket after
#: the dependencies { block is found
break
if not found:
raise ValueError("Unable to find dependencies in "
"{pkg}/app/build.gradle!".format(pkg=pkg))
#: Insert before the closing bracket
new_build.insert(i, " api project(':{name}')".format(
name=pkg))
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write("\n".join(new_build))
print("[Android] {} linked in app/build.gradle!".format(pkg))
else:
print("[Android] {} was already linked in "
"app/build.gradle!".format(pkg))
new_app_java = []
for package in new_packages:
#: Add our import statement
javacls = os.path.splitext(package)[0].replace("/", ".")
if not Link.is_app_linked(main_application_java, pkg, javacls):
#: Reuse previous if avialable
new_app_java = (new_app_java or
main_application_java.split("\n"))
#: Find last import statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line, "import *;"):
j = i
new_app_java.insert(j+1, "import {};".format(javacls))
#: Add the package statement
j = 0
for i, line in enumerate(new_app_java):
if fnmatch.fnmatch(line.strip(), "new *Package()"):
j = i
if j == 0:
raise ValueError("Could not find the correct spot to "
"add package {}".format(javacls))
else:
#: Get indent and add to previous line
#: Add comma to previous line
new_app_java[j] = new_app_java[j]+ ","
#: Insert new line
new_app_java.insert(j+1, " new {}()"
.format(javacls.split(".")[-1]))
else:
print("[Android] {} was already linked in {}!".format(
pkg, main_app_java_path))
if new_app_java:
with open(main_app_java_path, 'w') as f:
f.write("\n".join(new_app_java))
print(Colors.GREEN+"[Android] {} linked successfully!".format(
pkg)+Colors.RESET)
except Exception as e:
print(Colors.GREEN+"[Android] {} Failed to link. "
"Reverting due to error: "
"{}".format(pkg, e)+Colors.RESET)
#: Undo any changes
with open(join('android', 'settings.gradle'), 'w') as f:
f.write(settings_gradle)
with open(join('android', 'app', 'build.gradle'), 'w') as f:
f.write(build_gradle)
with open(main_app_java_path, 'w') as f:
f.write(main_application_java)
#: Now blow up
raise
|
Link's the android project to this library.
1. Includes this project's directory in the app's
android/settings.gradle
It adds:
include ':<project-name>'
project(':<project-name>').projectDir = new File(
rootProject.projectDir, '../packages/<project-name>/android')
2. Add's this project as a dependency to the android/app/build.gradle
It adds:
compile project(':<project-name>')
to the dependencies.
3. If preset, adds the import and package statement
to the android/app/src/main/java/<bundle/id>/MainApplication.java
|
def clear_cache(self):
'''
Completely clear cache
'''
errors = []
for rdir in (self.cache_root, self.file_list_cachedir):
if os.path.exists(rdir):
try:
shutil.rmtree(rdir)
except OSError as exc:
errors.append(
'Unable to delete {0}: {1}'.format(rdir, exc)
)
return errors
|
Completely clear cache
|
def load_pdb(self, pdb_id, mapped_chains=None, pdb_file=None, file_type=None, is_experimental=True,
set_as_representative=False, representative_chain=None, force_rerun=False):
"""Load a structure ID and optional structure file into the structures attribute.
Args:
pdb_id (str): PDB ID
mapped_chains (str, list): Chain ID or list of IDs which you are interested in
pdb_file (str): Path to PDB file
file_type (str): Type of PDB file
is_experimental (bool): If this structure file is experimental
set_as_representative (bool): If this structure should be set as the representative structure
representative_chain (str): If ``set_as_representative`` is ``True``, provide the representative chain ID
force_rerun (bool): If the PDB should be reloaded if it is already in the list of structures
Returns:
PDBProp: The object that is now contained in the structures attribute
"""
if self.structures.has_id(pdb_id):
# Remove the structure if set to force rerun
if force_rerun:
existing = self.structures.get_by_id(pdb_id)
self.structures.remove(existing)
# Otherwise just retrieve it
else:
log.debug('{}: PDB ID already present in list of structures'.format(pdb_id))
pdb = self.structures.get_by_id(pdb_id)
if pdb_file:
pdb.load_structure_path(pdb_file, file_type)
if mapped_chains:
pdb.add_mapped_chain_ids(mapped_chains)
# Create a new StructProp entry
if not self.structures.has_id(pdb_id):
if is_experimental:
pdb = PDBProp(ident=pdb_id, mapped_chains=mapped_chains, structure_path=pdb_file, file_type=file_type)
else:
pdb = StructProp(ident=pdb_id, mapped_chains=mapped_chains, structure_path=pdb_file, file_type=file_type)
self.structures.append(pdb)
if set_as_representative:
# Parse structure so chains are stored before setting representative
pdb.parse_structure()
self._representative_structure_setter(structprop=pdb, keep_chain=representative_chain, force_rerun=force_rerun)
return self.structures.get_by_id(pdb_id)
|
Load a structure ID and optional structure file into the structures attribute.
Args:
pdb_id (str): PDB ID
mapped_chains (str, list): Chain ID or list of IDs which you are interested in
pdb_file (str): Path to PDB file
file_type (str): Type of PDB file
is_experimental (bool): If this structure file is experimental
set_as_representative (bool): If this structure should be set as the representative structure
representative_chain (str): If ``set_as_representative`` is ``True``, provide the representative chain ID
force_rerun (bool): If the PDB should be reloaded if it is already in the list of structures
Returns:
PDBProp: The object that is now contained in the structures attribute
|
def update_default_output_dir(self):
"""Update output dir if set to default."""
if self.scenario_directory_radio.isChecked():
self.output_directory.setText(self.source_directory.text())
|
Update output dir if set to default.
|
def handle_events(self):
"""
An event handler that processes events from stdin and calls the on_click
function of the respective object. This function is run in another
thread, so as to not stall the main thread.
"""
for event in sys.stdin:
if event.startswith('['):
continue
name = json.loads(event.lstrip(','))['name']
for obj in self.loader.objects:
if obj.output_options['name'] == name:
obj.on_click(json.loads(event.lstrip(',')))
|
An event handler that processes events from stdin and calls the on_click
function of the respective object. This function is run in another
thread, so as to not stall the main thread.
|
def _ParsePlistKeyValue(self, knowledge_base, name, value):
"""Parses a plist key value.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
name (str): name of the plist key.
value (str): value of the plist key.
"""
if not knowledge_base.GetValue('keyboard_layout'):
if name in self._PLIST_KEYS:
if isinstance(value, (list, tuple)):
value = value[0]
_, _, keyboard_layout = value.rpartition('.')
knowledge_base.SetValue('keyboard_layout', keyboard_layout)
|
Parses a plist key value.
Args:
knowledge_base (KnowledgeBase): to fill with preprocessing information.
name (str): name of the plist key.
value (str): value of the plist key.
|
def make_middleware_stack(middleware, base):
"""
Given a list of in-order middleware callable objects `middleware` and a base function `base`, chains them
together so each middleware is fed the function below, and returns the top level ready to call.
:param middleware: The middleware stack
:type middleware: iterable[callable]
:param base: The base callable that the lowest-order middleware wraps
:type base: callable
:return: The topmost middleware, which calls the next middleware ... which calls the lowest-order middleware,
which calls the `base` callable.
:rtype: callable
"""
for ware in reversed(middleware):
base = ware(base)
return base
|
Given a list of in-order middleware callable objects `middleware` and a base function `base`, chains them
together so each middleware is fed the function below, and returns the top level ready to call.
:param middleware: The middleware stack
:type middleware: iterable[callable]
:param base: The base callable that the lowest-order middleware wraps
:type base: callable
:return: The topmost middleware, which calls the next middleware ... which calls the lowest-order middleware,
which calls the `base` callable.
:rtype: callable
|
def _log_players(self, players):
"""
:param players: list of catan.game.Player objects
"""
self._logln('players: {0}'.format(len(players)))
for p in self._players:
self._logln('name: {0}, color: {1}, seat: {2}'.format(p.name, p.color, p.seat))
|
:param players: list of catan.game.Player objects
|
def property_schema(self, key):
"""
Lookup the schema for a specific property.
"""
schema = self.__class__.SCHEMA
# first try plain properties
plain_schema = schema.get("properties", {}).get(key)
if plain_schema is not None:
return plain_schema
# then try pattern properties
pattern_properties = schema.get("patternProperties", {})
for pattern, pattern_schema in pattern_properties.items():
if match(pattern, key):
return pattern_schema
# finally try additional properties (defaults to true per JSON Schema)
return schema.get("additionalProperties", True)
|
Lookup the schema for a specific property.
|
def write_collection_from_tmpfile(self, collection_id, tmpfi, parent_sha, auth_info, commit_msg=''):
"""Given a collection_id, temporary filename of content, branch and auth_info
"""
return self.write_doc_from_tmpfile(collection_id,
tmpfi,
parent_sha,
auth_info,
commit_msg,
doctype_display_name="collection")
|
Given a collection_id, temporary filename of content, branch and auth_info
|
def rfdist_task(newick_string_a, newick_string_b, normalise, min_overlap=4, overlap_fail_value=0):
"""
Distributed version of tree_distance.rfdist
Parameters: two valid newick strings and a boolean
"""
tree_a = Tree(newick_string_a)
tree_b = Tree(newick_string_b)
return treedist.rfdist(tree_a, tree_b, normalise, min_overlap, overlap_fail_value)
|
Distributed version of tree_distance.rfdist
Parameters: two valid newick strings and a boolean
|
def is_stopword(self, text):
"""
Determine whether a single word is a stopword, or whether a short
phrase is made entirely of stopwords, disregarding context.
Use of this function should be avoided; it's better to give the text
in context and let the process determine which words are the stopwords.
"""
found_content_word = False
for record in self.analyze(text):
if not self.is_stopword_record(record):
found_content_word = True
break
return not found_content_word
|
Determine whether a single word is a stopword, or whether a short
phrase is made entirely of stopwords, disregarding context.
Use of this function should be avoided; it's better to give the text
in context and let the process determine which words are the stopwords.
|
def get_license_manager(service_instance):
'''
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
'''
log.debug('Retrieving license manager')
try:
lic_manager = service_instance.content.licenseManager
except vim.fault.NoPermission as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(
'Not enough permissions. Required privilege: '
'{0}'.format(exc.privilegeId))
except vim.fault.VimFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareApiError(exc.msg)
except vmodl.RuntimeFault as exc:
log.exception(exc)
raise salt.exceptions.VMwareRuntimeError(exc.msg)
return lic_manager
|
Returns the license manager.
service_instance
The Service Instance Object from which to obrain the license manager.
|
def page(self, number):
"""
Returns a Page object for the given 1-based page number.
"""
number = self.validate_number(number)
bottom = (number - 1) * self.per_page
top = bottom + self.per_page
if top + self.orphans >= self.count:
top = self.count
return self._get_page(self.object_list[bottom:top], number, self)
|
Returns a Page object for the given 1-based page number.
|
def cli_wrapper(generator):
"""Given a wizard, implements an interactive command-line human-friendly
interface for it.
Parameters
----------
generator
A generator such as one created by calling
:py:func:`rig.wizard.wizard_generator`.
Returns
-------
dict or None
Returns a dictionary containing the results of the wizard or None if
the wizard failed.
"""
first = True
response = None
while True:
# Insert blank lines between prompts
if not first:
print()
first = False
try:
message = generator.send(response)
if isinstance(message, MultipleChoice):
print(message.question)
for num, choice in enumerate(message.options):
print(" {}: {}".format(num, choice))
option = input("Select an option 0-{}{}: ".format(
len(message.options) - 1,
" (default: {})".format(message.default)
if message.default is not None else ""))
if option == "" and message.default is not None:
option = message.default
try:
response = int(option)
except ValueError:
response = -1
if not (0 <= response < len(message.options)):
print("ERROR: {} is not a valid option.".format(option))
return None
elif isinstance(message, Text):
print(message.question)
response = input("> ")
elif isinstance(message, Prompt):
print(message.message)
input("<Press enter to continue>")
response = None
elif isinstance(message, Info): # pragma: no branch
print(message.message)
response = None
except Failure as f:
print("ERROR: {}".format(str(f)))
return None
except Success as s:
return s.data
|
Given a wizard, implements an interactive command-line human-friendly
interface for it.
Parameters
----------
generator
A generator such as one created by calling
:py:func:`rig.wizard.wizard_generator`.
Returns
-------
dict or None
Returns a dictionary containing the results of the wizard or None if
the wizard failed.
|
def _get_files(self, attrs=None):
""" Get a list of all files in this download; each entry has the
attributes C{path} (relative to root), C{size} (in bytes),
C{mtime}, C{prio} (0=off, 1=normal, 2=high), C{created},
and C{opened}.
This is UNCACHED, use C{fetch("files")} instead.
@param attrs: Optional list of additional attributes to fetch.
"""
try:
# Get info for all files
f_multicall = self._engine._rpc.f.multicall
f_params = [self._fields["hash"], 0,
"f.path=", "f.size_bytes=", "f.last_touched=",
"f.priority=", "f.is_created=", "f.is_open=",
]
for attr in (attrs or []):
f_params.append("f.%s=" % attr)
rpc_result = f_multicall(*tuple(f_params))
except xmlrpc.ERRORS as exc:
raise error.EngineError("While %s torrent #%s: %s" % (
"getting files for", self._fields["hash"], exc))
else:
#self._engine.LOG.debug("files result: %r" % rpc_result)
# Return results
result = [Bunch(
path=i[0], size=i[1], mtime=i[2] / 1000000.0,
prio=i[3], created=i[4], opened=i[5],
) for i in rpc_result]
if attrs:
for idx, attr in enumerate(attrs):
if attr.startswith("get_"):
attr = attr[4:]
for item, rpc_item in zip(result, rpc_result):
item[attr] = rpc_item[6+idx]
return result
|
Get a list of all files in this download; each entry has the
attributes C{path} (relative to root), C{size} (in bytes),
C{mtime}, C{prio} (0=off, 1=normal, 2=high), C{created},
and C{opened}.
This is UNCACHED, use C{fetch("files")} instead.
@param attrs: Optional list of additional attributes to fetch.
|
async def sendto(self, data, component):
"""
Send a datagram on the specified component.
If the connection is not established, a `ConnectionError` is raised.
"""
active_pair = self._nominated.get(component)
if active_pair:
await active_pair.protocol.send_data(data, active_pair.remote_addr)
else:
raise ConnectionError('Cannot send data, not connected')
|
Send a datagram on the specified component.
If the connection is not established, a `ConnectionError` is raised.
|
def query_alternative_short_name():
"""
Returns list of alternative short name by query query parameters
---
tags:
- Query functions
parameters:
- name: name
in: query
type: string
required: false
description: Alternative short name
default: CVAP
- name: entry_name
in: query
type: string
required: false
description: UniProt entry name
default: A4_HUMAN
- name: limit
in: query
type: integer
required: false
description: limit of results numbers
default: 10
"""
args = get_args(
request_args=request.args,
allowed_str_args=['name', 'entry_name'],
allowed_int_args=['limit']
)
return jsonify(query.alternative_short_name(**args))
|
Returns list of alternative short name by query query parameters
---
tags:
- Query functions
parameters:
- name: name
in: query
type: string
required: false
description: Alternative short name
default: CVAP
- name: entry_name
in: query
type: string
required: false
description: UniProt entry name
default: A4_HUMAN
- name: limit
in: query
type: integer
required: false
description: limit of results numbers
default: 10
|
def _addDatasetAction(self, dataset):
"""
Adds an action for the inputed dataset to the toolbar
:param dataset | <XChartDataset>
"""
# create the toolbar action
action = QAction(dataset.name(), self)
action.setIcon(XColorIcon(dataset.color()))
action.setCheckable(True)
action.setChecked(True)
action.setData(wrapVariant(dataset))
action.toggled.connect(self.toggleDataset)
self.uiDatasetTBAR.addAction(action)
|
Adds an action for the inputed dataset to the toolbar
:param dataset | <XChartDataset>
|
def add_hits_to_proteins(self, hmm_hit_list):
'''Add HMMER results to Protein objects'''
for org in self.organisms:
print "adding SearchIO hit objects for", org.accession
for hit in hmm_hit_list:
hit_org_id = hit.id.split(',')[0]
hit_prot_id = hit.id.split(',')[1]
if org.accession == hit_org_id:
for prot in org.proteins:
if prot.accession == hit_prot_id:
prot.hmm_hit_list.append(hit)
|
Add HMMER results to Protein objects
|
def myRank(grade, badFormat, year, length):
'''rank of candidateNumber in year
Arguments:
grade {int} -- a weighted average for a specific candidate number and year
badFormat {dict} -- candNumber : [results for candidate]
year {int} -- year you are in
length {int} -- length of each row in badFormat divided by 2
Returns:
int -- rank of candidateNumber in year
'''
return int(sorted(everyonesAverage(year, badFormat, length), reverse=True).index(grade) + 1)
|
rank of candidateNumber in year
Arguments:
grade {int} -- a weighted average for a specific candidate number and year
badFormat {dict} -- candNumber : [results for candidate]
year {int} -- year you are in
length {int} -- length of each row in badFormat divided by 2
Returns:
int -- rank of candidateNumber in year
|
def factory(cls, endpoint, timeout, *args, **kwargs):
"""
A factory function which returns connections which have
succeeded in connecting and are ready for service (or
raises an exception otherwise).
"""
start = time.time()
kwargs['connect_timeout'] = timeout
conn = cls(endpoint, *args, **kwargs)
elapsed = time.time() - start
conn.connected_event.wait(timeout - elapsed)
if conn.last_error:
if conn.is_unsupported_proto_version:
raise ProtocolVersionUnsupported(endpoint, conn.protocol_version)
raise conn.last_error
elif not conn.connected_event.is_set():
conn.close()
raise OperationTimedOut("Timed out creating connection (%s seconds)" % timeout)
else:
return conn
|
A factory function which returns connections which have
succeeded in connecting and are ready for service (or
raises an exception otherwise).
|
def _matrix_adjust(self, X):
"""Adjust all values in X to encode for NaNs and infinities in the data.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
X : array-like, shape=(n_samples, n_feature)
Input array without any NaNs or infinities.
"""
data_matrix = X.data if sparse.issparse(X) else X
# Shift all values to specially encode for NAN/infinity/OTHER and 0
# Old value New Value
# --------- ---------
# N (0..int_max) N + 3
# np.NaN 2
# infinity 2
# *other* 1
#
# A value of 0 is reserved, as that is specially handled in sparse
# matrices.
data_matrix += len(SPARSE_ENCODINGS) + 1
data_matrix[~np.isfinite(data_matrix)] = SPARSE_ENCODINGS['NAN']
return X
|
Adjust all values in X to encode for NaNs and infinities in the data.
Parameters
----------
X : array-like, shape=(n_samples, n_feature)
Input array of type int.
Returns
-------
X : array-like, shape=(n_samples, n_feature)
Input array without any NaNs or infinities.
|
def diff(self, test_id_1, test_id_2, config=None, **kwargs):
"""
Create a diff report using test_id_1 as a baseline
:param: test_id_1: test id to be used as baseline
:param: test_id_2: test id to compare against baseline
:param: config file for diff (optional)
:param: **kwargs: keyword arguments
"""
output_directory = os.path.join(self._output_directory, 'diff_' + str(test_id_1) + '_' + str(test_id_2))
if kwargs:
if 'output_directory' in kwargs.keys():
output_directory = kwargs['output_directory']
diff_report = Diff([NaaradReport(self._analyses[test_id_1].output_directory, None),
NaaradReport(self._analyses[test_id_2].output_directory, None)],
'diff', output_directory, os.path.join(output_directory, self._resource_path),
self._resource_path)
if config:
naarad.utils.extract_diff_sla_from_config_file(diff_report, config)
diff_report.generate()
if diff_report.sla_failures > 0:
return CONSTANTS.SLA_FAILURE
if diff_report.status != 'OK':
return CONSTANTS.ERROR
return CONSTANTS.OK
|
Create a diff report using test_id_1 as a baseline
:param: test_id_1: test id to be used as baseline
:param: test_id_2: test id to compare against baseline
:param: config file for diff (optional)
:param: **kwargs: keyword arguments
|
def register_signals(self):
"""Register signals."""
from .models import Collection
from .receivers import CollectionUpdater
if self.app.config['COLLECTIONS_USE_PERCOLATOR']:
from .percolator import collection_inserted_percolator, \
collection_removed_percolator, \
collection_updated_percolator
# Register collection signals to update percolators
listen(Collection, 'after_insert',
collection_inserted_percolator)
listen(Collection, 'after_update',
collection_updated_percolator)
listen(Collection, 'after_delete',
collection_removed_percolator)
# Register Record signals to update record['_collections']
self.update_function = CollectionUpdater(app=self.app)
signals.before_record_insert.connect(self.update_function,
weak=False)
signals.before_record_update.connect(self.update_function,
weak=False)
|
Register signals.
|
def get_engine_from_session(dbsession: Session) -> Engine:
"""
Gets the SQLAlchemy :class:`Engine` from a SQLAlchemy :class:`Session`.
"""
engine = dbsession.bind
assert isinstance(engine, Engine)
return engine
|
Gets the SQLAlchemy :class:`Engine` from a SQLAlchemy :class:`Session`.
|
def computePerturbedExpectation(self, u_n, A_n, compute_uncertainty=True, uncertainty_method=None, warning_cutoff=1.0e-10, return_theta=False):
"""Compute the expectation of an observable of phase space function A(x) for a single new state.
Parameters
----------
u_n : np.ndarray, float, shape=(K, N_max)
u_n[n] = u(x_n) - the energy of the new state at all N samples previously sampled.
A_n : np.ndarray, float, shape=(K, N_max)
A_n[n] = A(x_n) - the phase space function of the new state at all N samples previously sampled. If this does NOT depend on state (e.g. position), it's simply the value of the observation. If it DOES depend on the current state, then the observables from the previous states need to be reevaluated at THIS state.
compute_uncertainty : bool, optional
If False, the uncertainties will not be computed (default: True)
uncertainty_method : string, optional
Choice of method used to compute asymptotic covariance method, or None to use default
See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None)
warning_cutoff : float, optional
Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10)
return_theta : bool, optional
Whether or not to return the theta matrix. Can be useful for complicated differences.
Returns
-------
A : float
A is the estimate for the expectation of A(x) for the specified state
dA : float
dA is uncertainty estimate for A
Notes
-----
See Section IV of [1].
# Compute estimators and uncertainty.
#A = sum(W_n[:,K] * A_n[:]) # Eq. 15 of [1]
#dA = abs(A) * np.sqrt(Theta_ij[K,K] + Theta_ij[K+1,K+1] - 2.0 * Theta_ij[K,K+1]) # Eq. 16 of [1]
"""
if len(np.shape(u_n)) == 2:
u_n = kn_to_n(u_n, N_k=self.N_k)
if len(np.shape(A_n)) == 2:
A_n = kn_to_n(A_n, N_k=self.N_k)
# Convert to np matrix.
A_n = np.array(A_n, dtype=np.float64)
# Retrieve N and K for convenience.
N = self.N
K = self.K
# Make A_k all positive so we can operate logarithmically for
# robustness
A_min = np.min(A_n)
A_n = A_n - (A_min - 1)
# Augment W_nk, N_k, and c_k for q_A(x) for the observable, with one
# extra row/column for the specified state (Eq. 13 of [1]).
# weight matrix
Log_W_nk = np.zeros([N, K + 2], dtype=np.float64)
N_k = np.zeros([K + 2], dtype=np.int32) # counts
f_k = np.zeros([K + 2], dtype=np.float64) # free energies
# Fill in first K states with existing q_k(x) from states.
Log_W_nk[:, 0:K] = self.Log_W_nk
N_k[0:K] = self.N_k
# compute the free energy of the additional state
log_w_n = self._computeUnnormalizedLogWeights(u_n)
# Compute free energies
f_k[K] = -_logsum(log_w_n)
Log_W_nk[:, K] = log_w_n + f_k[K]
# compute the observable at this state
Log_W_nk[:, K + 1] = np.log(A_n) + Log_W_nk[:, K]
f_k[K + 1] = -_logsum(Log_W_nk[:, K + 1])
Log_W_nk[:, K + 1] += f_k[K + 1] # normalize the row
A = np.exp(-f_k[K + 1])
if (compute_uncertainty or return_theta):
# Compute augmented asymptotic covariance matrix.
Theta_ij = self._computeAsymptoticCovarianceMatrix(
np.exp(Log_W_nk), N_k, method=uncertainty_method)
if (compute_uncertainty):
dA = np.abs(A) * np.sqrt(
Theta_ij[K + 1, K + 1] + Theta_ij[K, K] - 2.0 * Theta_ij[K, K + 1]) # Eq. 16 of [1]
# shift answers back with the offset now that variances are computed
A += (A_min - 1)
returns = []
returns.append(A)
if (compute_uncertainty):
returns.append(dA)
if (return_theta):
returns.append(Theta_ij)
# Return expectations and uncertainties.
return returns
|
Compute the expectation of an observable of phase space function A(x) for a single new state.
Parameters
----------
u_n : np.ndarray, float, shape=(K, N_max)
u_n[n] = u(x_n) - the energy of the new state at all N samples previously sampled.
A_n : np.ndarray, float, shape=(K, N_max)
A_n[n] = A(x_n) - the phase space function of the new state at all N samples previously sampled. If this does NOT depend on state (e.g. position), it's simply the value of the observation. If it DOES depend on the current state, then the observables from the previous states need to be reevaluated at THIS state.
compute_uncertainty : bool, optional
If False, the uncertainties will not be computed (default: True)
uncertainty_method : string, optional
Choice of method used to compute asymptotic covariance method, or None to use default
See help for computeAsymptoticCovarianceMatrix() for more information on various methods. (default: None)
warning_cutoff : float, optional
Warn if squared-uncertainty is negative and larger in magnitude than this number (default: 1.0e-10)
return_theta : bool, optional
Whether or not to return the theta matrix. Can be useful for complicated differences.
Returns
-------
A : float
A is the estimate for the expectation of A(x) for the specified state
dA : float
dA is uncertainty estimate for A
Notes
-----
See Section IV of [1].
# Compute estimators and uncertainty.
#A = sum(W_n[:,K] * A_n[:]) # Eq. 15 of [1]
#dA = abs(A) * np.sqrt(Theta_ij[K,K] + Theta_ij[K+1,K+1] - 2.0 * Theta_ij[K,K+1]) # Eq. 16 of [1]
|
def logMsg(self, msg, printMsg=True):
"""
logs a message and prints it to the screen
"""
time = datetime.datetime.now().strftime('%I:%M %p')
self.log = '{0}\n{1} | {2}'.format(self.log, time, msg)
if printMsg:
print msg
if self.addLogsToArcpyMessages:
from arcpy import AddMessage
AddMessage(msg)
|
logs a message and prints it to the screen
|
def __get_all_child_accounts_as_array(self, account: Account) -> List[Account]:
""" Returns the whole tree of child accounts in a list """
result = []
# ignore placeholders ? - what if a brokerage account has cash/stocks division?
# if not account.placeholder:
# continue
result.append(account)
for child in account.children:
sub_accounts = self.__get_all_child_accounts_as_array(child)
result += sub_accounts
return result
|
Returns the whole tree of child accounts in a list
|
def syncScrollbars(self):
"""
Synchronizes the various scrollbars within this chart.
"""
chart_hbar = self.uiChartVIEW.horizontalScrollBar()
chart_vbar = self.uiChartVIEW.verticalScrollBar()
x_hbar = self.uiXAxisVIEW.horizontalScrollBar()
x_vbar = self.uiXAxisVIEW.verticalScrollBar()
y_hbar = self.uiYAxisVIEW.horizontalScrollBar()
y_vbar = self.uiYAxisVIEW.verticalScrollBar()
x_hbar.setRange(chart_hbar.minimum(), chart_hbar.maximum())
x_hbar.setValue(chart_hbar.value())
x_vbar.setValue(0)
chart_vbar.setRange(y_vbar.minimum(), y_vbar.maximum())
chart_vbar.setValue(y_vbar.value())
y_hbar.setValue(4)
|
Synchronizes the various scrollbars within this chart.
|
def call_requests(
requests: Union[Request, Iterable[Request]], methods: Methods, debug: bool
) -> Response:
"""
Takes a request or list of Requests and calls them.
Args:
requests: Request object, or a collection of them.
methods: The list of methods that can be called.
debug: Include more information in error responses.
"""
if isinstance(requests, collections.Iterable):
return BatchResponse(safe_call(r, methods, debug=debug) for r in requests)
return safe_call(requests, methods, debug=debug)
|
Takes a request or list of Requests and calls them.
Args:
requests: Request object, or a collection of them.
methods: The list of methods that can be called.
debug: Include more information in error responses.
|
def deprecated(operation=None):
"""
Mark an operation deprecated.
"""
def inner(o):
o.deprecated = True
return o
return inner(operation) if operation else inner
|
Mark an operation deprecated.
|
def start_msstitch(exec_drivers, sysargs):
"""Passed all drivers of executable, checks which command is passed to
the executable and then gets the options for a driver, parses them from
command line and runs the driver"""
parser = populate_parser(exec_drivers)
args = parser.parse_args(sysargs[1:])
args.func(**vars(args))
|
Passed all drivers of executable, checks which command is passed to
the executable and then gets the options for a driver, parses them from
command line and runs the driver
|
def export_dist(self, args):
"""Copies a created dist to an output dir.
This makes it easy to navigate to the dist to investigate it
or call build.py, though you do not in general need to do this
and can use the apk command instead.
"""
ctx = self.ctx
dist = dist_from_args(ctx, args)
if dist.needs_build:
raise BuildInterruptingException(
'You asked to export a dist, but there is no dist '
'with suitable recipes available. For now, you must '
' create one first with the create argument.')
if args.symlink:
shprint(sh.ln, '-s', dist.dist_dir, args.output_dir)
else:
shprint(sh.cp, '-r', dist.dist_dir, args.output_dir)
|
Copies a created dist to an output dir.
This makes it easy to navigate to the dist to investigate it
or call build.py, though you do not in general need to do this
and can use the apk command instead.
|
def register_blueprint(self, blueprint: Blueprint, url_prefix: Optional[str]=None) -> None:
"""Register a blueprint on the app.
This results in the blueprint's routes, error handlers
etc... being added to the app.
Arguments:
blueprint: The blueprint to register.
url_prefix: Optional prefix to apply to all paths.
"""
first_registration = False
if blueprint.name in self.blueprints and self.blueprints[blueprint.name] is not blueprint:
raise RuntimeError(
f"Blueprint name '{blueprint.name}' "
f"is already registered by {self.blueprints[blueprint.name]}. "
"Blueprints must have unique names",
)
else:
self.blueprints[blueprint.name] = blueprint
first_registration = True
blueprint.register(self, first_registration, url_prefix=url_prefix)
|
Register a blueprint on the app.
This results in the blueprint's routes, error handlers
etc... being added to the app.
Arguments:
blueprint: The blueprint to register.
url_prefix: Optional prefix to apply to all paths.
|
def version_cli(ctx, porcelain):
# type: (click.Context, bool) -> None
""" Show project version. Has sub commands.
For this command to work you must specify where the project version is
stored. You can do that with version_file conf variable. peltak supports
multiple ways to store the project version. Right now you can store it in a
python file using built-in __version__ variable. You can use node.js
package.json and keep the version there or you can just use a plain text
file that just holds the raw project version. The appropriate storage is
guessed based on the file type and name.
Example Configuration::
version_file: 'src/mypackage/__init__.py'
Examples:
\b
$ peltak version # Pretty print current version
$ peltak version --porcelain # Print version as raw string
$ peltak version bump patch # Bump patch version component
$ peltak version bump minor # Bump minor version component
$ peltak version bump major # Bump major version component
$ peltak version bump release # same as version bump patch
$ peltak version bump --exact=1.2.1 # Set project version to 1.2.1
"""
if ctx.invoked_subcommand:
return
from peltak.core import log
from peltak.core import versioning
current = versioning.current()
if porcelain:
print(current)
else:
log.info("Version: <35>{}".format(current))
|
Show project version. Has sub commands.
For this command to work you must specify where the project version is
stored. You can do that with version_file conf variable. peltak supports
multiple ways to store the project version. Right now you can store it in a
python file using built-in __version__ variable. You can use node.js
package.json and keep the version there or you can just use a plain text
file that just holds the raw project version. The appropriate storage is
guessed based on the file type and name.
Example Configuration::
version_file: 'src/mypackage/__init__.py'
Examples:
\b
$ peltak version # Pretty print current version
$ peltak version --porcelain # Print version as raw string
$ peltak version bump patch # Bump patch version component
$ peltak version bump minor # Bump minor version component
$ peltak version bump major # Bump major version component
$ peltak version bump release # same as version bump patch
$ peltak version bump --exact=1.2.1 # Set project version to 1.2.1
|
def bigquery_schema(table):
"""Infer the schema of a BigQuery `table` object."""
fields = OrderedDict((el.name, dt.dtype(el)) for el in table.schema)
partition_info = table._properties.get('timePartitioning', None)
# We have a partitioned table
if partition_info is not None:
partition_field = partition_info.get('field', NATIVE_PARTITION_COL)
# Only add a new column if it's not already a column in the schema
fields.setdefault(partition_field, dt.timestamp)
return sch.schema(fields)
|
Infer the schema of a BigQuery `table` object.
|
def read_roi(fileobj):
'''
points = read_roi(fileobj)
Read ImageJ's ROI format. Points are returned in a nx2 array. Each row
is in [row, column] -- that is, (y,x) -- order.
'''
# This is based on:
# http://rsbweb.nih.gov/ij/developer/source/ij/io/RoiDecoder.java.html
# http://rsbweb.nih.gov/ij/developer/source/ij/io/RoiEncoder.java.html
SPLINE_FIT = 1
DOUBLE_HEADED = 2
OUTLINE = 4
OVERLAY_LABELS = 8
OVERLAY_NAMES = 16
OVERLAY_BACKGROUNDS = 32
OVERLAY_BOLD = 64
SUB_PIXEL_RESOLUTION = 128
DRAW_OFFSET = 256
class RoiType:
POLYGON = 0
RECT = 1
OVAL = 2
LINE = 3
FREELINE = 4
POLYLINE = 5
NOROI = 6
FREEHAND = 7
TRACED = 8
ANGLE = 9
POINT = 10
def get8():
s = fileobj.read(1)
if not s:
raise IOError('readroi: Unexpected EOF')
return ord(s)
def get16():
b0 = get8()
b1 = get8()
return (b0 << 8) | b1
def get32():
s0 = get16()
s1 = get16()
return (s0 << 16) | s1
def getfloat():
v = np.int32(get32())
return v.view(np.float32)
#===========================================================================
#Read Header data
magic = fileobj.read(4)
if magic != b'Iout':
raise ValueError('Magic number not found')
version = get16()
# It seems that the roi type field occupies 2 Bytes, but only one is used
roi_type = get8()
# Discard second Byte:
get8()
top = get16()
left = get16()
bottom = get16()
right = get16()
n_coordinates = get16()
x1 = getfloat()
y1 = getfloat()
x2 = getfloat()
y2 = getfloat()
stroke_width = get16()
shape_roi_size = get32()
stroke_color = get32()
fill_color = get32()
subtype = get16()
options = get16()
arrow_style = get8()
arrow_head_size = get8()
rect_arc_size = get16()
position = get32()
header2offset = get32()
# End Header data
#===========================================================================
#RoiDecoder.java checks the version when setting sub-pixel resolution, therefore so do we
subPixelResolution = ((options&SUB_PIXEL_RESOLUTION)!=0) and (version>=222)
# Check exceptions
if roi_type not in [RoiType.FREEHAND, RoiType.TRACED, RoiType.POLYGON, RoiType.RECT, RoiType.POINT]:
raise NotImplementedError('roireader: ROI type %s not supported' % roi_type)
if subtype != 0:
raise NotImplementedError('roireader: ROI subtype %s not supported (!= 0)' % subtype)
if roi_type == RoiType.RECT:
if subPixelResolution:
return np.array(
[[y1, x1], [y1, x1+x2], [y1+y2, x1+x2], [y1+y2, x1]],
dtype=np.float32)
else:
return np.array(
[[top, left], [top, right], [bottom, right], [bottom, left]],
dtype=np.int16)
if subPixelResolution:
getc = getfloat
points = np.empty((n_coordinates, 2), dtype=np.float32)
fileobj.seek(4*n_coordinates, 1)
else:
getc = get16
points = np.empty((n_coordinates, 2), dtype=np.int16)
points[:, 1] = [getc() for i in range(n_coordinates)]
points[:, 0] = [getc() for i in range(n_coordinates)]
if not subPixelResolution:
points[:, 1] += left
points[:, 0] += top
return points
|
points = read_roi(fileobj)
Read ImageJ's ROI format. Points are returned in a nx2 array. Each row
is in [row, column] -- that is, (y,x) -- order.
|
def _RemoveAuthorizedKeys(self, user):
"""Remove a Linux user account's authorized keys file to prevent login.
Args:
user: string, the Linux user account to remove access.
"""
pw_entry = self._GetUser(user)
if not pw_entry:
return
home_dir = pw_entry.pw_dir
authorized_keys_file = os.path.join(home_dir, '.ssh', 'authorized_keys')
if os.path.exists(authorized_keys_file):
try:
os.remove(authorized_keys_file)
except OSError as e:
message = 'Could not remove authorized keys for user %s. %s.'
self.logger.warning(message, user, str(e))
|
Remove a Linux user account's authorized keys file to prevent login.
Args:
user: string, the Linux user account to remove access.
|
def get_session_token(self):
''' get session token '''
# HTTP POST query to session authenticate API
try:
response = requests.post(self.__session_url__ + 'sessionauth/v1/authenticate',
cert=(self.__crt__, self.__key__), verify=True)
except requests.exceptions.RequestException as err:
self.logger.error(err)
raise
if response.status_code == 200:
# load json response as list
data = json.loads(response.text)
self.logger.debug(data)
# grab token from list
session_token = data['token']
else:
raise Exception('BAD HTTP STATUS: %s' % str(response.status_code))
# return the token
self.logger.debug(session_token)
return session_token
|
get session token
|
def close_monomers(self, group, cutoff=4.0):
"""Returns a list of Monomers from within a cut off distance of the Monomer
Parameters
----------
group: BaseAmpal or Subclass
Group to be search for Monomers that are close to this Monomer.
cutoff: float
Distance cut off.
Returns
-------
nearby_residues: [Monomers]
List of Monomers within cut off distance.
"""
nearby_residues = []
for self_atom in self.atoms.values():
nearby_atoms = group.is_within(cutoff, self_atom)
for res_atom in nearby_atoms:
if res_atom.parent not in nearby_residues:
nearby_residues.append(res_atom.parent)
return nearby_residues
|
Returns a list of Monomers from within a cut off distance of the Monomer
Parameters
----------
group: BaseAmpal or Subclass
Group to be search for Monomers that are close to this Monomer.
cutoff: float
Distance cut off.
Returns
-------
nearby_residues: [Monomers]
List of Monomers within cut off distance.
|
def convert(area_um, deform, emodulus,
channel_width_in, channel_width_out,
flow_rate_in, flow_rate_out,
viscosity_in, viscosity_out,
inplace=False):
"""convert area-deformation-emodulus triplet
The conversion formula is described in :cite:`Mietke2015`.
Parameters
----------
area_um: ndarray
Convex cell area [µm²]
deform: ndarray
Deformation
emodulus: ndarray
Young's Modulus [kPa]
channel_width_in: float
Original channel width [µm]
channel_width_out: float
Target channel width [µm]
flow_rate_in: float
Original flow rate [µl/s]
flow_rate_in: float
Target flow rate [µl/s]
viscosity_in: float
Original viscosity [mPa*s]
viscosity_out: float
Target viscosity [mPa*s]
inplace: bool
If True, override input arrays with corrected data
Returns
-------
area_um_corr: ndarray
Corrected cell area [µm²]
deform_corr: ndarray
Deformation (a copy if `inplace` is False)
emodulus_corr: ndarray
Corrected emodulus [kPa]
"""
copy = not inplace
# make sure area_um_corr is not an integer array
area_um_corr = np.array(area_um, dtype=float, copy=copy)
deform_corr = np.array(deform, copy=copy)
emodulus_corr = np.array(emodulus, copy=copy)
if channel_width_in != channel_width_out:
area_um_corr *= (channel_width_out / channel_width_in)**2
if (flow_rate_in != flow_rate_out or
viscosity_in != viscosity_out or
channel_width_in != channel_width_out):
emodulus_corr *= (flow_rate_out / flow_rate_in) \
* (viscosity_out / viscosity_in) \
* (channel_width_in / channel_width_out)**3
return area_um_corr, deform_corr, emodulus_corr
|
convert area-deformation-emodulus triplet
The conversion formula is described in :cite:`Mietke2015`.
Parameters
----------
area_um: ndarray
Convex cell area [µm²]
deform: ndarray
Deformation
emodulus: ndarray
Young's Modulus [kPa]
channel_width_in: float
Original channel width [µm]
channel_width_out: float
Target channel width [µm]
flow_rate_in: float
Original flow rate [µl/s]
flow_rate_in: float
Target flow rate [µl/s]
viscosity_in: float
Original viscosity [mPa*s]
viscosity_out: float
Target viscosity [mPa*s]
inplace: bool
If True, override input arrays with corrected data
Returns
-------
area_um_corr: ndarray
Corrected cell area [µm²]
deform_corr: ndarray
Deformation (a copy if `inplace` is False)
emodulus_corr: ndarray
Corrected emodulus [kPa]
|
def create_roles(apps, schema_editor):
"""Create the enterprise roles if they do not already exist."""
SystemWideEnterpriseRole = apps.get_model('enterprise', 'SystemWideEnterpriseRole')
SystemWideEnterpriseRole.objects.update_or_create(name=ENTERPRISE_ADMIN_ROLE)
SystemWideEnterpriseRole.objects.update_or_create(name=ENTERPRISE_LEARNER_ROLE)
|
Create the enterprise roles if they do not already exist.
|
def gettrace(self, burn=0, thin=1, chain=-1, slicing=None):
"""Return the trace (last by default).
:Parameters:
burn : integer
The number of transient steps to skip.
thin : integer
Keep one in thin.
chain : integer
The index of the chain to fetch. If None, return all chains. The
default is to return the last chain.
slicing : slice object
A slice overriding burn and thin assignement.
"""
if chain is not None:
tables = [self.db._gettables()[chain], ]
else:
tables = self.db._gettables()
for i, table in enumerate(tables):
if slicing is not None:
burn, stop, thin = slicing.start, slicing.stop, slicing.step
if slicing is None or stop is None:
stop = table.nrows
col = table.read(start=burn, stop=stop, step=thin, field=self.name)
if i == 0:
data = np.asarray(col)
else:
data = np.append(data, col, axis=0)
return data
|
Return the trace (last by default).
:Parameters:
burn : integer
The number of transient steps to skip.
thin : integer
Keep one in thin.
chain : integer
The index of the chain to fetch. If None, return all chains. The
default is to return the last chain.
slicing : slice object
A slice overriding burn and thin assignement.
|
def tocimxmlstr(value, indent=None):
"""
Return the CIM-XML representation of the CIM object or CIM data type,
as a :term:`unicode string`.
*New in pywbem 0.9.*
The returned CIM-XML representation is consistent with :term:`DSP0201`.
Parameters:
value (:term:`CIM object` or :term:`CIM data type` or :term:`Element`):
The CIM object or CIM data type to be converted to CIM-XML, or an
:term:`Element` object that already is the CIM-XML representation.
indent (:term:`string` or :term:`integer`):
`None` indicates that a single-line version of the XML should be
returned, without any whitespace between the XML elements.
Other values indicate that a prettified, multi-line version of the XML
should be returned. A string value specifies the indentation string to
be used for each level of nested XML elements. An integer value
specifies an indentation string of so many blanks.
Returns:
The CIM-XML representation of the value, as a :term:`unicode string`.
"""
if isinstance(value, Element):
xml_elem = value
else:
xml_elem = tocimxml(value)
if indent is None:
xml_str = xml_elem.toxml()
else:
if isinstance(indent, six.string_types):
pass # use indent, as specified
elif isinstance(indent, six.integer_types):
indent = ' ' * indent
else:
raise TypeError(
_format("Type of indent must be string or integer, but is: {0}",
type(indent)))
xml_str = xml_elem.toprettyxml(indent=indent)
# xml_str is a unicode string if required based upon its content.
return _ensure_unicode(xml_str)
|
Return the CIM-XML representation of the CIM object or CIM data type,
as a :term:`unicode string`.
*New in pywbem 0.9.*
The returned CIM-XML representation is consistent with :term:`DSP0201`.
Parameters:
value (:term:`CIM object` or :term:`CIM data type` or :term:`Element`):
The CIM object or CIM data type to be converted to CIM-XML, or an
:term:`Element` object that already is the CIM-XML representation.
indent (:term:`string` or :term:`integer`):
`None` indicates that a single-line version of the XML should be
returned, without any whitespace between the XML elements.
Other values indicate that a prettified, multi-line version of the XML
should be returned. A string value specifies the indentation string to
be used for each level of nested XML elements. An integer value
specifies an indentation string of so many blanks.
Returns:
The CIM-XML representation of the value, as a :term:`unicode string`.
|
def is_entailed_by(self, other):
""" If the other is as or more specific than self"""
other = BoolCell.coerce(other)
if self.value == U or other.value == self.value:
return True
return False
|
If the other is as or more specific than self
|
def dial(self, number, timeout=5, callStatusUpdateCallbackFunc=None):
""" Calls the specified phone number using a voice phone call
:param number: The phone number to dial
:param timeout: Maximum time to wait for the call to be established
:param callStatusUpdateCallbackFunc: Callback function that is executed if the call's status changes due to
remote events (i.e. when it is answered, the call is ended by the remote party)
:return: The outgoing call
:rtype: gsmmodem.modem.Call
"""
if self._waitForCallInitUpdate:
# Wait for the "call originated" notification message
self._dialEvent = threading.Event()
try:
self.write('ATD{0};'.format(number), timeout=timeout, waitForResponse=self._waitForAtdResponse)
except Exception:
self._dialEvent = None # Cancel the thread sync lock
raise
else:
# Don't wait for a call init update - base the call ID on the number of active calls
self.write('ATD{0};'.format(number), timeout=timeout, waitForResponse=self._waitForAtdResponse)
self.log.debug("Not waiting for outgoing call init update message")
callId = len(self.activeCalls) + 1
callType = 0 # Assume voice
call = Call(self, callId, callType, number, callStatusUpdateCallbackFunc)
self.activeCalls[callId] = call
return call
if self._mustPollCallStatus:
# Fake a call notification by polling call status until the status indicates that the call is being dialed
threading.Thread(target=self._pollCallStatus, kwargs={'expectedState': 0, 'timeout': timeout}).start()
if self._dialEvent.wait(timeout):
self._dialEvent = None
callId, callType = self._dialResponse
call = Call(self, callId, callType, number, callStatusUpdateCallbackFunc)
self.activeCalls[callId] = call
return call
else: # Call establishing timed out
self._dialEvent = None
raise TimeoutException()
|
Calls the specified phone number using a voice phone call
:param number: The phone number to dial
:param timeout: Maximum time to wait for the call to be established
:param callStatusUpdateCallbackFunc: Callback function that is executed if the call's status changes due to
remote events (i.e. when it is answered, the call is ended by the remote party)
:return: The outgoing call
:rtype: gsmmodem.modem.Call
|
def get(self, key):
"""
Get parsed result.
After :func:`parse` the argv, we can get the parsed results::
# command.option('-f', 'description of -f')
command.get('-f')
command.get('verbose')
# we can also get ``verbose``: command.verbose
"""
value = self._results.get(key)
if value is not None:
return value
# get from option default value
option = list(filter(lambda o: o.key == key, self._option_list))
if not option:
raise ValueError('No such option: %s' % key)
option = option[0]
return option.default
|
Get parsed result.
After :func:`parse` the argv, we can get the parsed results::
# command.option('-f', 'description of -f')
command.get('-f')
command.get('verbose')
# we can also get ``verbose``: command.verbose
|
def ProgramScanner(**kw):
"""Return a prototype Scanner instance for scanning executable
files for static-lib dependencies"""
kw['path_function'] = SCons.Scanner.FindPathDirs('LIBPATH')
ps = SCons.Scanner.Base(scan, "ProgramScanner", **kw)
return ps
|
Return a prototype Scanner instance for scanning executable
files for static-lib dependencies
|
def inverse_transform(self, y, lengths=None):
"""Return label strings.
Args:
y: label id matrix.
lengths: sentences length.
Returns:
list: list of list of strings.
"""
y = np.argmax(y, -1)
inverse_y = [self._label_vocab.id2doc(ids) for ids in y]
if lengths is not None:
inverse_y = [iy[:l] for iy, l in zip(inverse_y, lengths)]
return inverse_y
|
Return label strings.
Args:
y: label id matrix.
lengths: sentences length.
Returns:
list: list of list of strings.
|
def patchproperty(*cls, **kwargs):
"""
class getter 함수 패치 decorator
EX)
class B(A):
pass
@patchproperty(B)
def prop(self):
return 'hello'
:param cls:
:param kwargs:
"""
def _patch(fun):
m = kwargs.pop('property', None) or fun.__name__
p = property(fun)
for c in cls:
setattr(c, m, p)
def wrap(fun):
_patch(fun)
return fun
return wrap
|
class getter 함수 패치 decorator
EX)
class B(A):
pass
@patchproperty(B)
def prop(self):
return 'hello'
:param cls:
:param kwargs:
|
def text(self, selector):
"""Return text result that executed by given css selector
:param selector: `str` css selector
:return: `list` or `None`
"""
result = self.__bs4.select(selector)
return [r.get_text() for r in result] \
if result.__len__() > 1 else \
result[0].get_text() if result.__len__() > 0 else None
|
Return text result that executed by given css selector
:param selector: `str` css selector
:return: `list` or `None`
|
def num_samples(self):
"""
Return the total number of samples.
"""
with self.container.open_if_needed(mode='r') as cnt:
return cnt.get(self.key)[0].shape[0]
|
Return the total number of samples.
|
def element_exists(self, element):
"""
Checks if given element exists.
Usage::
>>> plist_file_parser = PlistFileParser("standard.plist")
>>> plist_file_parser.parse()
True
>>> plist_file_parser.element_exists("String A")
True
>>> plist_file_parser.element_exists("String Nemo")
False
:param element: Element to check existence.
:type element: unicode
:return: Element existence.
:rtype: bool
"""
if not self.__elements:
return False
for item in foundations.walkers.dictionaries_walker(self.__elements):
path, key, value = item
if key == element:
LOGGER.debug("> '{0}' attribute exists.".format(element))
return True
LOGGER.debug("> '{0}' element doesn't exists.".format(element))
return False
|
Checks if given element exists.
Usage::
>>> plist_file_parser = PlistFileParser("standard.plist")
>>> plist_file_parser.parse()
True
>>> plist_file_parser.element_exists("String A")
True
>>> plist_file_parser.element_exists("String Nemo")
False
:param element: Element to check existence.
:type element: unicode
:return: Element existence.
:rtype: bool
|
def _run__http(self, action, replace):
"""More complex HTTP query."""
query = action['query']
# self._debug = True
url = '{type}://{host}{path}'.format(path=query['path'], **action)
content = None
method = query.get('method', "get").lower()
self.debug("{} {} url={}\n", action['type'], method, url)
if method == "post":
content = query['content']
headers = query.get('headers', {})
if replace and action.get('template'):
self.rfxcfg.macro_expand(url, replace)
if content:
if isinstance(content, dict):
for key, value in content.items():
content[key] = self.rfxcfg.macro_expand(value, replace)
else:
content = self.rfxcfg.macro_expand(content, replace)
newhdrs = dict()
for key, value in headers.items():
newhdrs[key.lower()] = self.rfxcfg.macro_expand(value, replace)
headers = newhdrs
self.debug("{} headers={}\n", action['type'], headers)
self.debug("{} content={}\n", action['type'], content)
if content and isinstance(content, dict):
content = json.dumps(content)
self.logf("Action {name} {type}\n", **action)
result = getattr(requests, method)(url, headers=headers, data=content, timeout=action.get('timeout', 5))
expect = action.get('expect', {})
expected_codes = expect.get("response-codes", (200, 201, 202, 204))
self.debug("{} expect codes={}\n", action['type'], expected_codes)
self.debug("{} status={} content={}\n", action['type'], result.status_code, result.text)
if result.status_code not in expected_codes:
self.die("Unable to make {} call, unexpected result ({})",
action['type'], result.status_code)
if 'content' in expect:
self.debug("{} expect content={}\n", action['type'], expect['content'])
if expect['content'] not in result.text:
self.die("{} call to {} failed\nExpected: {}\nReceived:\n{}",
action['type'], url, expect['content'], result.text)
if 'regex' in expect:
self.debug("{} expect regex={}\n", action['type'], expect['regex'])
if not re.search(expect['regex'], result.text):
self.die("{} call to {} failed\nRegex: {}\nDid not match:\n{}",
action['type'], url, expect['regex'], result.text)
self.log(result.text, level=common.log_msg)
self.logf("Success, status={}\n", result.status_code, level=common.log_good)
return True
|
More complex HTTP query.
|
def hmmalign_sequences(self, hmm, sequences, output_file):
'''Run hmmalign and convert output to aligned fasta format
Parameters
----------
hmm: str
path to hmm file
sequences: str
path to file of sequences to be aligned
output_file: str
write sequences to this file
Returns
-------
nothing
'''
cmd = 'hmmalign --trim %s %s' % (hmm, sequences)
output = extern.run(cmd)
with open(output_file, 'w') as f:
SeqIO.write(SeqIO.parse(StringIO(output), 'stockholm'), f, 'fasta')
|
Run hmmalign and convert output to aligned fasta format
Parameters
----------
hmm: str
path to hmm file
sequences: str
path to file of sequences to be aligned
output_file: str
write sequences to this file
Returns
-------
nothing
|
def get_write_fields(self):
"""
Get the list of fields used to write the header, separating
record and signal specification fields. Returns the default
required fields, the user defined fields,
and their dependencies.
Does NOT include `d_signal` or `e_d_signal`.
Returns
-------
rec_write_fields : list
Record specification fields to be written. Includes
'comment' if present.
sig_write_fields : dict
Dictionary of signal specification fields to be written,
with values equal to the channels that need to be present
for each field.
"""
# Record specification fields
rec_write_fields = self.get_write_subset('record')
# Add comments if any
if self.comments != None:
rec_write_fields.append('comments')
# Get required signal fields if signals are present.
self.check_field('n_sig')
if self.n_sig > 0:
sig_write_fields = self.get_write_subset('signal')
else:
sig_write_fields = None
return rec_write_fields, sig_write_fields
|
Get the list of fields used to write the header, separating
record and signal specification fields. Returns the default
required fields, the user defined fields,
and their dependencies.
Does NOT include `d_signal` or `e_d_signal`.
Returns
-------
rec_write_fields : list
Record specification fields to be written. Includes
'comment' if present.
sig_write_fields : dict
Dictionary of signal specification fields to be written,
with values equal to the channels that need to be present
for each field.
|
def _group(self, group_data):
"""Return previously stored group or new group.
Args:
group_data (dict|obj): An Group dict or instance of Group object.
Returns:
dict|obj: The new Group dict/object or the previously stored dict/object.
"""
if isinstance(group_data, dict):
# get xid from dict
xid = group_data.get('xid')
else:
# get xid from object
xid = group_data.xid
if self.groups.get(xid) is not None:
# return existing group from memory
group_data = self.groups.get(xid)
elif self.groups_shelf.get(xid) is not None:
# return existing group from shelf
group_data = self.groups_shelf.get(xid)
else:
# store new group
self.groups[xid] = group_data
return group_data
|
Return previously stored group or new group.
Args:
group_data (dict|obj): An Group dict or instance of Group object.
Returns:
dict|obj: The new Group dict/object or the previously stored dict/object.
|
def args(self, *args, **kwargs):
"""
Creates a ArgumentsExpectationRule and adds it to the expectation
"""
self._any_args = False
self._arguments_rule.set_args(*args, **kwargs)
return self
|
Creates a ArgumentsExpectationRule and adds it to the expectation
|
def watch(models, criterion=None, log="gradients", log_freq=100):
"""
Hooks into the torch model to collect gradients and the topology. Should be extended
to accept arbitrary ML models.
:param (torch.Module) models: The model to hook, can be a tuple
:param (torch.F) criterion: An optional loss value being optimized
:param (str) log: One of "gradients", "parameters", "all", or None
:param (int) log_freq: log gradients and parameters every N batches
:return: (wandb.Graph) The graph object that will populate after the first backward pass
"""
global watch_called
if run is None:
raise ValueError(
"You must call `wandb.init` before calling watch")
if watch_called:
raise ValueError(
"You can only call `wandb.watch` once per process. If you want to watch multiple models, pass them in as a tuple."
)
watch_called = True
log_parameters = False
log_gradients = True
if log == "all":
log_parameters = True
elif log == "parameters":
log_parameters = True
log_gradients = False
elif log is None:
log_gradients = False
if not isinstance(models, (tuple, list)):
models = (models,)
graphs = []
prefix = ''
for idx, model in enumerate(models):
if idx > 0:
prefix = "graph_%i" % idx
run.history.torch.add_log_hooks_to_pytorch_module(
model, log_parameters=log_parameters, log_gradients=log_gradients, prefix=prefix, log_freq=log_freq)
graph = wandb_torch.TorchGraph.hook_torch(model, criterion, graph_idx=idx)
graphs.append(graph)
# NOTE: the graph is set in run.summary by hook_torch on the backward pass
return graphs
|
Hooks into the torch model to collect gradients and the topology. Should be extended
to accept arbitrary ML models.
:param (torch.Module) models: The model to hook, can be a tuple
:param (torch.F) criterion: An optional loss value being optimized
:param (str) log: One of "gradients", "parameters", "all", or None
:param (int) log_freq: log gradients and parameters every N batches
:return: (wandb.Graph) The graph object that will populate after the first backward pass
|
async def handle_action(self, action_type, payload, **kwds):
"""
The default action Handler has no action.
"""
# if there is a service attached to the action handler
if hasattr(self, 'service'):
# handle roll calls
await roll_call_handler(self.service, action_type, payload, **kwds)
|
The default action Handler has no action.
|
def get_cluster(
self,
project_id,
region,
cluster_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Gets the resource representation for a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster_name`:
>>> cluster_name = ''
>>>
>>> response = client.get_cluster(project_id, region, cluster_name)
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "get_cluster" not in self._inner_api_calls:
self._inner_api_calls[
"get_cluster"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_cluster,
default_retry=self._method_configs["GetCluster"].retry,
default_timeout=self._method_configs["GetCluster"].timeout,
client_info=self._client_info,
)
request = clusters_pb2.GetClusterRequest(
project_id=project_id, region=region, cluster_name=cluster_name
)
return self._inner_api_calls["get_cluster"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
Gets the resource representation for a cluster in a project.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.ClusterControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `cluster_name`:
>>> cluster_name = ''
>>>
>>> response = client.get_cluster(project_id, region, cluster_name)
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the cluster
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
cluster_name (str): Required. The cluster name.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types.Cluster` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
|
def read(self, output_tile):
"""
Read from written process output.
Parameters
----------
output_tile : BufferedTile or tile index tuple
Member of the output tile pyramid (not necessarily the process
pyramid, if output has a different metatiling setting)
Returns
-------
data : NumPy array or features
process output
"""
if self.config.mode not in ["readonly", "continue", "overwrite"]:
raise ValueError("process mode must be readonly, continue or overwrite")
if isinstance(output_tile, tuple):
output_tile = self.config.output_pyramid.tile(*output_tile)
elif isinstance(output_tile, BufferedTile):
pass
else:
raise TypeError("output_tile must be tuple or BufferedTile")
return self.config.output.read(output_tile)
|
Read from written process output.
Parameters
----------
output_tile : BufferedTile or tile index tuple
Member of the output tile pyramid (not necessarily the process
pyramid, if output has a different metatiling setting)
Returns
-------
data : NumPy array or features
process output
|
def download(self, content, filename=None,
media_type=None, charset='UTF-8'):
"""Send content as attachment (downloadable file).
The *content* is sent after setting Content-Disposition header
such that the client prompts the user to save the content
locally as a file. An HTTP response status code may be specified
as *content*. If the status code is not ``200``, then this
method does nothing and returns the status code.
The filename used for the download is determined according to
the following rules. The rules are followed in the specified
order.
1. If *filename* is specified, then the base name from this
argument, i.e. ``os.path.basename(filename)``, is used as the
filename for the download.
2. If *filename* is not specified or specified as ``None``
(the default), then the base name from the file path
specified to a previous :meth:`static` call made while
handling the current request is used.
3. If *filename* is not specified and there was no
:meth:`static` call made previously for the current
request, then the base name from the current HTTP request
path is used.
4. As a result of the above steps, if the resultant *filename*
turns out to be empty, then :exc:`ice.LogicError` is raised.
The *media_type* and *charset* arguments are used in the same
manner as they are used in :meth:`static`.
Arguments:
content (str, bytes or int): Content to be sent as download or
HTTP status code of the response to be returned.
filename (str): Filename to use for saving the content
media_type (str, optional): Media type of file.
charset (str, optional): Character set of file.
Returns:
content, i.e. the first argument passed to this method.
Raises:
LogicError: When filename cannot be determined.
"""
if isinstance(content, int) and content != 200:
return content
if filename is not None:
filename = os.path.basename(filename)
elif 'filename' in self.response.state:
filename = self.response.state['filename']
else:
filename = os.path.basename(self.request.path)
if filename == '':
raise LogicError('Cannot determine filename for download')
if media_type is not None:
self.response.media_type = media_type
else:
self.response.media_type = mimetypes.guess_type(filename)[0]
self.response.charset = charset
self.response.add_header('Content-Disposition', 'attachment; '
'filename="{}"'.format(filename))
return content
|
Send content as attachment (downloadable file).
The *content* is sent after setting Content-Disposition header
such that the client prompts the user to save the content
locally as a file. An HTTP response status code may be specified
as *content*. If the status code is not ``200``, then this
method does nothing and returns the status code.
The filename used for the download is determined according to
the following rules. The rules are followed in the specified
order.
1. If *filename* is specified, then the base name from this
argument, i.e. ``os.path.basename(filename)``, is used as the
filename for the download.
2. If *filename* is not specified or specified as ``None``
(the default), then the base name from the file path
specified to a previous :meth:`static` call made while
handling the current request is used.
3. If *filename* is not specified and there was no
:meth:`static` call made previously for the current
request, then the base name from the current HTTP request
path is used.
4. As a result of the above steps, if the resultant *filename*
turns out to be empty, then :exc:`ice.LogicError` is raised.
The *media_type* and *charset* arguments are used in the same
manner as they are used in :meth:`static`.
Arguments:
content (str, bytes or int): Content to be sent as download or
HTTP status code of the response to be returned.
filename (str): Filename to use for saving the content
media_type (str, optional): Media type of file.
charset (str, optional): Character set of file.
Returns:
content, i.e. the first argument passed to this method.
Raises:
LogicError: When filename cannot be determined.
|
def sendEmail(sender,
recipients,
subject,
body,
attachments=None,
cc=None,
bcc=None,
contentType='text/html',
server=None,
useMSExchange=None,
encoding='utf-8',
raiseErrors=False):
"""
Sends an email from the inputted email address to the
list of given recipients with the inputted subject and
body. This will also attach the inputted list of
attachments to the email. The server value will default
to mail.<sender_domain> and you can use a ':' to specify
a port for the server.
:param sender <str>
:param recipients <list> [ <str>, .. ]
:param subject <str>
:param body <str>
:param attachments <list> [ <str>, .. ]
:param cc <list> [ <str>, .. ]
:param bcc <list> [ <str>, .. ]
:param contentType <str>
:param server <str>
:return <bool> success
"""
if attachments is None:
attachments = []
if cc is None:
cc = []
if bcc is None:
bcc = []
if server is None:
server = NOTIFY_SERVER
if useMSExchange is None:
useMSExchange = NOTIFY_SERVER_MSX
# normalize the data
sender = nstr(sender)
recipients = map(nstr, recipients)
# make sure we have valid information
if not isEmail(sender):
err = errors.NotifyError('%s is not a valid email address' % sender)
logger.error(err)
return False
# make sure there are recipients
if not recipients:
err = errors.NotifyError('No recipients were supplied.')
logger.error(err)
return False
# build the server domain
if not server:
err = errors.NotifyError('No email server specified')
logger.error(err)
return False
# create the email
msg = MIMEMultipart(_subtype='related')
msg['Subject'] = projex.text.toUtf8(subject)
msg['From'] = sender
msg['To'] = ','.join(recipients)
msg['Cc'] = ','.join([nstr(addr) for addr in cc if isEmail(addr)])
msg['Bcc'] = ','.join([nstr(addr) for addr in bcc if isEmail(addr)])
msg['Date'] = nstr(datetime.datetime.now())
msg['Content-type'] = 'Multipart/mixed'
msg.preamble = 'This is a multi-part message in MIME format.'
msg.epilogue = ''
# build the body
bodyhtml = projex.text.toUtf8(body)
eattach = []
# include inline images
filepaths = re.findall('<img\s+src="(file:///[^"]+)"[^/>]*/?>', bodyhtml)
for filepath in filepaths:
filename = filepath.replace('file:///', '')
if os.path.exists(filename) and filename not in attachments:
# replace with the attachment id
cid = 'cid:%s' % os.path.basename(filename)
bodyhtml = bodyhtml.replace(filename, cid)
# add the image to the attachments
fp = open(nstr(filename), 'rb')
msgImage = MIMEImage(fp.read())
fp.close()
# add the msg image to the msg
content_id = '<%s>' % os.path.basename(filename)
inline_link = 'inline; filename="%s"' % os.path.basename(filename)
msgImage.add_header('Content-ID', content_id)
msgImage.add_header('Content-Disposition', inline_link)
eattach.append(msgImage)
attachments.append(filename)
# create the body text
msgText = MIMEText(bodyhtml, contentType, encoding)
msgText['Content-type'] = contentType
# include attachments
for attach in attachments:
fp = open(nstr(attach), 'rb')
txt = MIMEBase('application', 'octet-stream')
txt.set_payload(fp.read())
fp.close()
encode_base64(txt)
attachment = 'attachment; filename="%s"' % os.path.basename(attach)
txt.add_header('Content-Disposition', attachment)
eattach.append(txt)
eattach.insert(0, msgText)
# add the attachments to the message
for attach in eattach:
msg.attach(attach)
# create the connection to the email server
try:
smtp_server = smtplib.SMTP(nstr(server))
except socket.gaierror, err:
logger.error(err)
if raiseErrors:
raise
return False
except Exception, err:
logger.error(err)
if raiseErrors:
raise
return False
# connect to a microsoft exchange server if specified
if useMSExchange:
success, response = connectMSExchange(smtp_server)
if not success:
logger.debug('Could not connect to MS Exchange: ' + response)
try:
smtp_server.sendmail(sender, recipients, msg.as_string())
smtp_server.close()
except Exception, err:
logger.error(err)
if raiseErrors:
raise
return False
return True
|
Sends an email from the inputted email address to the
list of given recipients with the inputted subject and
body. This will also attach the inputted list of
attachments to the email. The server value will default
to mail.<sender_domain> and you can use a ':' to specify
a port for the server.
:param sender <str>
:param recipients <list> [ <str>, .. ]
:param subject <str>
:param body <str>
:param attachments <list> [ <str>, .. ]
:param cc <list> [ <str>, .. ]
:param bcc <list> [ <str>, .. ]
:param contentType <str>
:param server <str>
:return <bool> success
|
def stopall(self, sudo=False, quiet=True):
'''stop ALL instances. This command is only added to the command group
as it doesn't make sense to call from a single instance
Parameters
==========
sudo: if the command should be done with sudo (exposes different set of
instances)
'''
from spython.utils import run_command, check_install
check_install()
subgroup = 'instance.stop'
if 'version 3' in self.version():
subgroup = ["instance", "stop"]
cmd = self._init_command(subgroup)
cmd = cmd + ['--all']
output = run_command(cmd, sudo=sudo, quiet=quiet)
if output['return_code'] != 0:
message = '%s : return code %s' %(output['message'],
output['return_code'])
bot.error(message)
return output['return_code']
return output['return_code']
|
stop ALL instances. This command is only added to the command group
as it doesn't make sense to call from a single instance
Parameters
==========
sudo: if the command should be done with sudo (exposes different set of
instances)
|
def mstmap(args):
"""
%prog mstmap bcffile/vcffile > matrixfile
Convert bcf/vcf format to mstmap input.
"""
from jcvi.assembly.geneticmap import MSTMatrix
p = OptionParser(mstmap.__doc__)
p.add_option("--dh", default=False, action="store_true",
help="Double haploid population, no het [default: %default]")
p.add_option("--freq", default=.2, type="float",
help="Allele must be above frequency [default: %default]")
p.add_option("--mindepth", default=3, type="int",
help="Only trust genotype calls with depth [default: %default]")
p.add_option("--missing_threshold", default=.25, type="float",
help="Fraction missing must be below")
p.add_option("--noheader", default=False, action="store_true",
help="Do not print MSTmap run parameters [default: %default]")
p.add_option("--pv4", default=False, action="store_true",
help="Enable filtering strand-bias, tail distance bias, etc. "
"[default: %default]")
p.add_option("--freebayes", default=False, action="store_true",
help="VCF output from freebayes")
p.set_sep(sep=".", help="Use separator to simplify individual names")
p.set_outfile()
opts, args = p.parse_args(args)
if len(args) != 1:
sys.exit(not p.print_help())
vcffile, = args
if vcffile.endswith(".bcf"):
bcffile = vcffile
vcffile = bcffile.rsplit(".", 1)[0] + ".vcf"
cmd = "bcftools view {0}".format(bcffile)
cmd += " | vcfutils.pl varFilter"
if not opts.pv4:
cmd += " -1 0 -2 0 -3 0 -4 0 -e 0"
if need_update(bcffile, vcffile):
sh(cmd, outfile=vcffile)
freq = opts.freq
sep = opts.sep
depth_index = 1 if opts.freebayes else 2
ptype = "DH" if opts.dh else "RIL6"
nohet = ptype == "DH"
fp = open(vcffile)
genotypes = []
for row in fp:
if row[:2] == "##":
continue
atoms = row.split()
if row[0] == '#':
ind = [x.split(sep)[0] for x in atoms[9:]]
nind = len(ind)
mh = ["locus_name"] + ind
continue
marker = "{0}.{1}".format(*atoms[:2])
geno = atoms[9:]
geno = [encode_genotype(x, mindepth=opts.mindepth,
depth_index=depth_index,
nohet=nohet) for x in geno]
assert len(geno) == nind
f = 1. / nind
if geno.count("A") * f < freq:
continue
if geno.count("B") * f < freq:
continue
if geno.count("-") * f > opts.missing_threshold:
continue
genotype = [marker] + geno
genotypes.append(genotype)
mm = MSTMatrix(genotypes, mh, ptype, opts.missing_threshold)
mm.write(opts.outfile, header=(not opts.noheader))
|
%prog mstmap bcffile/vcffile > matrixfile
Convert bcf/vcf format to mstmap input.
|
def nearest_neighbor(x, tSet):
"""[summary]
Implements the nearest neighbor algorithm
Arguments:
x {[tupel]} -- [vector]
tSet {[dict]} -- [training set]
Returns:
[type] -- [result of the AND-function]
"""
assert isinstance(x, tuple) and isinstance(tSet, dict)
current_key = ()
min_d = float('inf')
for key in tSet:
d = distance(x, key)
if d < min_d:
min_d = d
current_key = key
return tSet[current_key]
|
[summary]
Implements the nearest neighbor algorithm
Arguments:
x {[tupel]} -- [vector]
tSet {[dict]} -- [training set]
Returns:
[type] -- [result of the AND-function]
|
def loadFromTemplate(template, stim=None):
"""Initialized this stimulus from a saved *template*
:param template: doc from a previously stored stimulus via :class:`templateDoc`
:type template: dict
"""
stim = StimulusModel.loadFromTemplate(template, stim=stim)
qstim = QStimulusModel(stim)
qstim.setEditor(template['testtype'])
return qstim
|
Initialized this stimulus from a saved *template*
:param template: doc from a previously stored stimulus via :class:`templateDoc`
:type template: dict
|
def acceptedUser(self, logType):
'''Verify enetered user name is on accepted MCC logbook list.'''
from urllib2 import urlopen, URLError, HTTPError
import json
isApproved = False
userName = str(self.logui.userName.text())
if userName == "":
return False # Must have a user name to submit entry
if logType == "MCC":
networkFault = False
data = []
log_url = "https://mccelog.slac.stanford.edu/elog/dev/mgibbs/dev_json_user_list.php/?username=" + userName
try:
data = urlopen(log_url, None, 5).read()
data = json.loads(data)
except URLError as error:
print("URLError: " + str(error.reason))
networkFault = True
except HTTPError as error:
print("HTTPError: " + str(error.reason))
networkFault = True
# If network fails, ask user to verify
if networkFault:
msgBox = QMessageBox()
msgBox.setText("Cannot connect to MCC Log Server!")
msgBox.setInformativeText("Use entered User name anyway?")
msgBox.setStandardButtons(QMessageBox.Ok | QMessageBox.Cancel)
msgBox.setDefaultButton(QMessageBox.Ok)
if msgBox.exec_() == QMessageBox.Ok:
isApproved = True
if data != [] and (data is not None):
isApproved = True
else:
isApproved = True
return isApproved
|
Verify enetered user name is on accepted MCC logbook list.
|
def allocate_ids(self, partial_keys):
"""
Allocate IDs for incomplete keys.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/allocateIds
:param partial_keys: a list of partial keys.
:type partial_keys: list
:return: a list of full keys.
:rtype: list
"""
conn = self.get_conn()
resp = (conn
.projects()
.allocateIds(projectId=self.project_id, body={'keys': partial_keys})
.execute(num_retries=self.num_retries))
return resp['keys']
|
Allocate IDs for incomplete keys.
.. seealso::
https://cloud.google.com/datastore/docs/reference/rest/v1/projects/allocateIds
:param partial_keys: a list of partial keys.
:type partial_keys: list
:return: a list of full keys.
:rtype: list
|
def _prm_write_shared_table(self, key, hdf5_group, fullname, **kwargs):
"""Creates a new empty table"""
first_row = None
description = None
if 'first_row' in kwargs:
first_row = kwargs.pop('first_row')
if not 'description' in kwargs:
description = {}
for colname in first_row:
data = first_row[colname]
column = self._all_get_table_col(key, [data], fullname)
description[colname] = column
if 'description' in kwargs:
description = kwargs.pop('description')
if 'filters' in kwargs:
filters = kwargs.pop('filters')
else:
filters = self._all_get_filters(kwargs)
table = self._hdf5file.create_table(where=hdf5_group, name=key,
description=description,
filters=filters,
**kwargs)
table.flush()
if first_row is not None:
row = table.row
for key in description:
row[key] = first_row[key]
row.append()
table.flush()
|
Creates a new empty table
|
def text_(s, encoding='latin-1', errors='strict'):
'''
If ``s`` is an instance of ``binary_type``, return
``s.decode(encoding, errors)``, otherwise return ``s``
'''
return s.decode(encoding, errors) if isinstance(s, binary_type) else s
|
If ``s`` is an instance of ``binary_type``, return
``s.decode(encoding, errors)``, otherwise return ``s``
|
def input_value(self, locator, text):
"""Sets the given value into text field identified by `locator`. This is an IOS only keyword, input value makes use of set_value
See `introduction` for details about locating elements.
"""
self._info("Setting text '%s' into text field '%s'" % (text, locator))
self._element_input_value_by_locator(locator, text)
|
Sets the given value into text field identified by `locator`. This is an IOS only keyword, input value makes use of set_value
See `introduction` for details about locating elements.
|
def download(sess_id_or_alias, files, dest):
"""
Download files from a running container.
\b
SESSID: Session ID or its alias given when creating the session.
FILES: Paths inside container.
"""
if len(files) < 1:
return
with Session() as session:
try:
print_wait('Downloading file(s) from {}...'
.format(sess_id_or_alias))
kernel = session.Kernel(sess_id_or_alias)
kernel.download(files, dest, show_progress=True)
print_done('Downloaded to {}.'.format(dest.resolve()))
except Exception as e:
print_error(e)
sys.exit(1)
|
Download files from a running container.
\b
SESSID: Session ID or its alias given when creating the session.
FILES: Paths inside container.
|
def format_block(block, nlspaces=0):
"""
Format the given block of text, trimming leading/trailing
empty lines and any leading whitespace that is common to all lines.
The purpose is to let us list a code block as a multiline,
triple-quoted Python string, taking care of
indentation concerns.
http://code.activestate.com/recipes/145672/
"""
# separate block into lines
lines = smart_text(block).split('\n')
# remove leading/trailing empty lines
while lines and not lines[0]:
del lines[0]
while lines and not lines[-1]:
del lines[-1]
# look at first line to see how much indentation to trim
ws = re.match(r'\s*', lines[0]).group(0)
if ws:
lines = map(lambda x: x.replace(ws, '', 1), lines)
# remove leading/trailing blank lines (after leading ws removal)
# we do this again in case there were pure-whitespace lines
while lines and not lines[0]:
del lines[0]
while lines and not lines[-1]:
del lines[-1]
# account for user-specified leading spaces
flines = ['%s%s' % (' ' * nlspaces, line) for line in lines]
return '\n'.join(flines) + '\n'
|
Format the given block of text, trimming leading/trailing
empty lines and any leading whitespace that is common to all lines.
The purpose is to let us list a code block as a multiline,
triple-quoted Python string, taking care of
indentation concerns.
http://code.activestate.com/recipes/145672/
|
def _walk_through(job_dir, display_progress=False):
'''
Walk through the job dir and return jobs
'''
serial = salt.payload.Serial(__opts__)
for top in os.listdir(job_dir):
t_path = os.path.join(job_dir, top)
for final in os.listdir(t_path):
load_path = os.path.join(t_path, final, '.load.p')
with salt.utils.files.fopen(load_path, 'rb') as rfh:
job = serial.load(rfh)
if not os.path.isfile(load_path):
continue
with salt.utils.files.fopen(load_path, 'rb') as rfh:
job = serial.load(rfh)
jid = job['jid']
if display_progress:
__jid_event__.fire_event(
{'message': 'Found JID {0}'.format(jid)},
'progress'
)
yield jid, job, t_path, final
|
Walk through the job dir and return jobs
|
def dump_data_peek(data, base = 0,
separator = ' ',
width = 16,
bits = None):
"""
Dump data from pointers guessed within the given binary data.
@type data: str
@param data: Dictionary mapping offsets to the data they point to.
@type base: int
@param base: Base offset.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
"""
if data is None:
return ''
pointers = compat.keys(data)
pointers.sort()
result = ''
for offset in pointers:
dumped = HexDump.hexline(data[offset], separator, width)
address = HexDump.address(base + offset, bits)
result += '%s -> %s\n' % (address, dumped)
return result
|
Dump data from pointers guessed within the given binary data.
@type data: str
@param data: Dictionary mapping offsets to the data they point to.
@type base: int
@param base: Base offset.
@type bits: int
@param bits:
(Optional) Number of bits of the target architecture.
The default is platform dependent. See: L{HexDump.address_size}
@rtype: str
@return: Text suitable for logging.
|
def as_list(self, key):
"""
A convenience method which fetches the specified value, guaranteeing
that it is a list.
>>> a = ConfigObj()
>>> a['a'] = 1
>>> a.as_list('a')
[1]
>>> a['a'] = (1,)
>>> a.as_list('a')
[1]
>>> a['a'] = [1]
>>> a.as_list('a')
[1]
"""
result = self[key]
if isinstance(result, (tuple, list)):
return list(result)
return [result]
|
A convenience method which fetches the specified value, guaranteeing
that it is a list.
>>> a = ConfigObj()
>>> a['a'] = 1
>>> a.as_list('a')
[1]
>>> a['a'] = (1,)
>>> a.as_list('a')
[1]
>>> a['a'] = [1]
>>> a.as_list('a')
[1]
|
def cli(env, account_id, content_url, type, cname):
"""Create an origin pull mapping."""
manager = SoftLayer.CDNManager(env.client)
manager.add_origin(account_id, type, content_url, cname)
|
Create an origin pull mapping.
|
def split(self):
"""Split the phase.
When a phase is exhausted, it gets split into a pair of phases to be
further solved. The split happens like so:
1) Select the first unsolved package scope.
2) Find some common dependency in the first N variants of the scope.
3) Split the scope into two: [:N] and [N:].
4) Create two copies of the phase, containing each half of the split
scope.
The result of this split is that we have a new phase (the first phase),
which contains a package scope with a common dependency. This
dependency can now be intersected with the current resolve, thus
progressing it.
Returns:
A 2-tuple of _ResolvePhase objects, where the first phase is the
best contender for resolving.
"""
assert(self.status == SolverStatus.exhausted)
scopes = []
next_scopes = []
split_i = None
for i, scope in enumerate(self.scopes):
if split_i is None:
r = scope.split()
if r is not None:
scope_, next_scope = r
scopes.append(scope_)
next_scopes.append(next_scope)
split_i = i
continue
scopes.append(scope)
next_scopes.append(scope)
assert split_i is not None
phase = copy.copy(self)
phase.scopes = scopes
phase.status = SolverStatus.pending
phase.changed_scopes_i = set([split_i])
# because a scope was narrowed by a split, other scopes need to be
# reduced against it
#for i in range(len(phase.scopes)):
# if i != split_i:
# phase.pending_reducts.add((i, split_i))
next_phase = copy.copy(phase)
next_phase.scopes = next_scopes
return (phase, next_phase)
|
Split the phase.
When a phase is exhausted, it gets split into a pair of phases to be
further solved. The split happens like so:
1) Select the first unsolved package scope.
2) Find some common dependency in the first N variants of the scope.
3) Split the scope into two: [:N] and [N:].
4) Create two copies of the phase, containing each half of the split
scope.
The result of this split is that we have a new phase (the first phase),
which contains a package scope with a common dependency. This
dependency can now be intersected with the current resolve, thus
progressing it.
Returns:
A 2-tuple of _ResolvePhase objects, where the first phase is the
best contender for resolving.
|
def remove_pane(self, pane):
"""
Remove a :class:`.Pane`. (Look in all windows.)
"""
assert isinstance(pane, Pane)
for w in self.windows:
w.remove_pane(pane)
# No panes left in this window?
if not w.has_panes:
# Focus next.
for app, active_w in self._active_window_for_cli.items():
if w == active_w:
with set_app(app):
self.focus_next_window()
self.windows.remove(w)
|
Remove a :class:`.Pane`. (Look in all windows.)
|
def step(self, path=None, peer_table=None):
"""
Run one step of this algorithm:
* find the set of missing zonefiles
* try to fetch each of them
* store them
* update our zonefile database
Fetch rarest zonefiles first, but batch
whenever possible.
Return the number of zonefiles fetched
"""
# if os.environ.get("BLOCKSTACK_TEST", None) == "1":
# log.debug("%s: %s step" % (self.hostport, self.__class__.__name__))
if path is None:
path = self.atlasdb_path
num_fetched = 0
missing_zinfo = None
peer_hostports = None
with AtlasPeerTableLocked(peer_table) as ptbl:
missing_zfinfo = atlas_find_missing_zonefile_availability( peer_table=ptbl, path=path )
peer_hostports = ptbl.keys()[:]
# ask for zonefiles in rarest-first order
zonefile_ranking = [ (missing_zfinfo[zfhash]['popularity'], zfhash) for zfhash in missing_zfinfo.keys() ]
zonefile_ranking.sort()
zonefile_hashes = list(set([zfhash for (_, zfhash) in zonefile_ranking]))
zonefile_names = dict([(zfhash, missing_zfinfo[zfhash]['names']) for zfhash in zonefile_hashes])
zonefile_txids = dict([(zfhash, missing_zfinfo[zfhash]['txid']) for zfhash in zonefile_hashes])
zonefile_block_heights = dict([(zfhash, missing_zfinfo[zfhash]['block_heights']) for zfhash in zonefile_hashes])
zonefile_origins = self.find_zonefile_origins( missing_zfinfo, peer_hostports )
# filter out the ones that are already cached
for i in xrange(0, len(zonefile_hashes)):
# is this zonefile already cached?
zfhash = zonefile_hashes[i]
present = is_zonefile_cached( zfhash, self.zonefile_dir, validate=True )
if present:
log.debug("%s: zonefile %s already cached. Marking present" % (self.hostport, zfhash))
zonefile_hashes[i] = None
# mark it as present
self.set_zonefile_present(zfhash, min(zonefile_block_heights[zfhash]), path=path)
zonefile_hashes = filter( lambda zfh: zfh is not None, zonefile_hashes )
if len(zonefile_hashes) > 0:
log.debug("%s: missing %s unique zonefiles" % (self.hostport, len(zonefile_hashes)))
while len(zonefile_hashes) > 0 and self.running:
zfhash = zonefile_hashes[0]
zfnames = zonefile_names[zfhash]
zftxid = zonefile_txids[zfhash]
peers = missing_zfinfo[zfhash]['peers']
# try this zonefile's hosts in order by perceived availability
peers = atlas_rank_peers_by_health( peer_list=peers, with_zero_requests=True )
if len(peers) > 0:
log.debug("%s: zonefile %s available from %s peers (%s...)" % (self.hostport, zfhash, len(peers), ",".join(peers[:min(5, len(peers))])))
for peer_hostport in peers:
if zfhash not in zonefile_origins[peer_hostport]:
# not available
log.debug("%s not available from %s" % (zfhash, peer_hostport))
continue
# what other zonefiles can we get?
# only ask for the ones we don't have
peer_zonefile_hashes = []
for zfh in zonefile_origins[peer_hostport]:
if zfh in zonefile_hashes:
# can ask for this one too
peer_zonefile_hashes.append( zfh )
if len(peer_zonefile_hashes) == 0:
log.debug("%s: No zonefiles available from %s" % (self.hostport, peer_hostport))
continue
# get them all
log.debug("%s: get %s zonefiles from %s" % (self.hostport, len(peer_zonefile_hashes), peer_hostport))
zonefiles = atlas_get_zonefiles( self.hostport, peer_hostport, peer_zonefile_hashes, peer_table=peer_table )
if zonefiles is not None:
# got zonefiles!
stored_zfhashes = self.store_zonefiles( zonefile_names, zonefiles, zonefile_txids, zonefile_block_heights, peer_zonefile_hashes, peer_hostport, path )
# don't ask again
log.debug("Stored %s zonefiles" % len(stored_zfhashes))
for zfh in stored_zfhashes:
if zfh in peer_zonefile_hashes:
peer_zonefile_hashes.remove(zfh)
if zfh in zonefile_hashes:
zonefile_hashes.remove(zfh)
num_fetched += 1
else:
log.debug("%s: no data received from %s" % (self.hostport, peer_hostport))
with AtlasPeerTableLocked() as ptbl:
# if the node didn't actually have these zonefiles, then
# update their inventories so we don't ask for them again.
# TODO: do NOT ban nodes that repeatedly lie to us, since the "node" could be a load-balancer for a set of nodes that may or may not have the zonefile
for zfh in peer_zonefile_hashes:
log.debug("%s: %s did not have %s" % (self.hostport, peer_hostport, zfh))
atlas_peer_set_zonefile_status( peer_hostport, zfh, False, zonefile_bits=missing_zfinfo[zfh]['indexes'], peer_table=ptbl )
if zfh in zonefile_origins[peer_hostport]:
zonefile_origins[peer_hostport].remove( zfh )
# done with this zonefile
if zfhash in zonefile_hashes:
zonefile_hashes.remove(zfhash)
if len(zonefile_hashes) > 0 or num_fetched > 0:
log.debug("%s: fetched %s zonefiles" % (self.hostport, num_fetched))
return num_fetched
|
Run one step of this algorithm:
* find the set of missing zonefiles
* try to fetch each of them
* store them
* update our zonefile database
Fetch rarest zonefiles first, but batch
whenever possible.
Return the number of zonefiles fetched
|
def namedb_get_all_revealed_namespace_ids( self, current_block ):
"""
Get all non-expired revealed namespaces.
"""
query = "SELECT namespace_id FROM namespaces WHERE op = ? AND reveal_block < ?;"
args = (NAMESPACE_REVEAL, current_block + NAMESPACE_REVEAL_EXPIRE )
namespace_rows = namedb_query_execute( cur, query, args )
ret = []
for namespace_row in namespace_rows:
ret.append( namespace_row['namespace_id'] )
return ret
|
Get all non-expired revealed namespaces.
|
def epilogue(app_name):
"""
Return the epilogue for the help command.
"""
app_name = clr.stringc(app_name, "bright blue")
command = clr.stringc("command", "cyan")
help = clr.stringc("--help", "green")
return "\n%s %s %s for more info on a command\n" % (app_name,
command, help)
|
Return the epilogue for the help command.
|
def fqscreen_plot (self):
""" Makes a fancy custom plot which replicates the plot seen in the main
FastQ Screen program. Not useful if lots of samples as gets too wide. """
categories = list()
getCats = True
data = list()
p_types = OrderedDict()
p_types['multiple_hits_multiple_libraries'] = {'col': '#7f0000', 'name': 'Multiple Hits, Multiple Genomes' }
p_types['one_hit_multiple_libraries'] = {'col': '#ff0000', 'name': 'One Hit, Multiple Genomes' }
p_types['multiple_hits_one_library'] = {'col': '#00007f', 'name': 'Multiple Hits, One Genome' }
p_types['one_hit_one_library'] = {'col': '#0000ff', 'name': 'One Hit, One Genome' }
for k, t in p_types.items():
first = True
for s in sorted(self.fq_screen_data.keys()):
thisdata = list()
if len(categories) > 0:
getCats = False
for org in sorted(self.fq_screen_data[s]):
if org == 'total_reads':
continue
try:
thisdata.append(self.fq_screen_data[s][org]['percentages'][k])
except KeyError:
thisdata.append(None)
if getCats:
categories.append(org)
td = {
'name': t['name'],
'stack': s,
'data': thisdata,
'color': t['col']
}
if first:
first = False
else:
td['linkedTo'] = ':previous'
data.append(td)
html = '<div id="fq_screen_plot" class="hc-plot"></div> \n\
<script type="text/javascript"> \n\
fq_screen_data = {};\n\
fq_screen_categories = {};\n\
$(function () {{ \n\
$("#fq_screen_plot").highcharts({{ \n\
chart: {{ type: "column", backgroundColor: null }}, \n\
title: {{ text: "FastQ Screen Results" }}, \n\
xAxis: {{ categories: fq_screen_categories }}, \n\
yAxis: {{ \n\
max: 100, \n\
min: 0, \n\
title: {{ text: "Percentage Aligned" }} \n\
}}, \n\
tooltip: {{ \n\
formatter: function () {{ \n\
return "<b>" + this.series.stackKey.replace("column","") + " - " + this.x + "</b><br/>" + \n\
this.series.name + ": " + this.y + "%<br/>" + \n\
"Total Alignment: " + this.point.stackTotal + "%"; \n\
}}, \n\
}}, \n\
plotOptions: {{ \n\
column: {{ \n\
pointPadding: 0, \n\
groupPadding: 0.02, \n\
stacking: "normal" }} \n\
}}, \n\
series: fq_screen_data \n\
}}); \n\
}}); \n\
</script>'.format(json.dumps(data), json.dumps(categories))
return html
|
Makes a fancy custom plot which replicates the plot seen in the main
FastQ Screen program. Not useful if lots of samples as gets too wide.
|
def compile_and_process(self, in_path):
"""compile a file, save it to the ouput file if the inline flag true"""
out_path = self.path_mapping[in_path]
if not self.embed:
pdebug("[%s::%s] %s -> %s" % (
self.compiler_name,
self.name,
os.path.relpath(in_path),
os.path.relpath(out_path)),
groups=["build_task"],
autobreak=True)
else:
pdebug("[%s::%s] %s -> <cache>" % (
self.compiler_name,
self.name,
os.path.relpath(in_path)),
groups=["build_task"],
autobreak=True)
compiled_string = self.compile_file(in_path)
if not self.embed:
if compiled_string != "":
with open(out_path, "w") as f:
f.write(compiled_string)
return compiled_string
|
compile a file, save it to the ouput file if the inline flag true
|
def h2o_explained_variance_score(y_actual, y_predicted, weights=None):
"""
Explained variance regression score function.
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:param weights: (Optional) sample weights
:returns: the explained variance score.
"""
ModelBase._check_targets(y_actual, y_predicted)
_, numerator = _mean_var(y_actual - y_predicted, weights)
_, denominator = _mean_var(y_actual, weights)
if denominator == 0.0:
return 1. if numerator == 0 else 0. # 0/0 => 1, otherwise, 0
return 1 - numerator / denominator
|
Explained variance regression score function.
:param y_actual: H2OFrame of actual response.
:param y_predicted: H2OFrame of predicted response.
:param weights: (Optional) sample weights
:returns: the explained variance score.
|
def _get_and_count_containers(self, custom_cgroups=False, healthchecks=False):
"""List all the containers from the API, filter and count them."""
# Querying the size of containers is slow, we don't do it at each run
must_query_size = self.collect_container_size and self._latest_size_query == 0
self._latest_size_query = (self._latest_size_query + 1) % SIZE_REFRESH_RATE
running_containers_count = Counter()
all_containers_count = Counter()
try:
containers = self.docker_util.client.containers(all=True, size=must_query_size)
except Exception as e:
message = "Unable to list Docker containers: {0}".format(e)
self.service_check(SERVICE_CHECK_NAME, AgentCheck.CRITICAL,
message=message, tags=self.custom_tags)
raise Exception(message)
else:
self.service_check(SERVICE_CHECK_NAME, AgentCheck.OK, tags=self.custom_tags)
# Create a set of filtered containers based on the exclude/include rules
# and cache these rules in docker_util
self._filter_containers(containers)
containers_by_id = {}
for container in containers:
container_name = DockerUtil.container_name_extractor(container)[0]
container_status_tags = self._get_tags(container, CONTAINER)
all_containers_count[tuple(sorted(container_status_tags))] += 1
if self._is_container_running(container):
running_containers_count[tuple(sorted(container_status_tags))] += 1
# Check if the container is included/excluded via its tags
if self._is_container_excluded(container):
self.log.debug("Container {0} is excluded".format(container_name))
continue
containers_by_id[container['Id']] = container
# grab pid via API if custom cgroups - otherwise we won't find process when
# crawling for pids.
if custom_cgroups or healthchecks:
try:
inspect_dict = self.docker_util.client.inspect_container(container_name)
container['_pid'] = inspect_dict['State']['Pid']
container['health'] = inspect_dict['State'].get('Health', {})
except Exception as e:
self.log.debug("Unable to inspect Docker container: %s", e)
total_count = 0
# TODO: deprecate these 2, they should be replaced by _report_container_count
for tags, count in running_containers_count.iteritems():
total_count += count
self.gauge("docker.containers.running", count, tags=list(tags))
self.gauge("docker.containers.running.total", total_count, tags=self.custom_tags)
total_count = 0
for tags, count in all_containers_count.iteritems():
stopped_count = count - running_containers_count[tags]
total_count += stopped_count
self.gauge("docker.containers.stopped", stopped_count, tags=list(tags))
self.gauge("docker.containers.stopped.total", total_count, tags=self.custom_tags)
return containers_by_id
|
List all the containers from the API, filter and count them.
|
def _create_chrome_options(self):
"""Create and configure a chrome options object
:returns: chrome options object
"""
# Create Chrome options
options = webdriver.ChromeOptions()
if self.config.getboolean_optional('Driver', 'headless'):
self.logger.debug("Running Chrome in headless mode")
options.add_argument('--headless')
if os.name == 'nt': # Temporarily needed if running on Windows.
options.add_argument('--disable-gpu')
# Add Chrome preferences, mobile emulation options and chrome arguments
self._add_chrome_options(options, 'prefs')
self._add_chrome_options(options, 'mobileEmulation')
self._add_chrome_arguments(options)
return options
|
Create and configure a chrome options object
:returns: chrome options object
|
def _cldf2wld(dataset):
"""Make lingpy-compatible dictinary out of cldf main data."""
header = [f for f in dataset.dataset.lexeme_class.fieldnames() if f != 'ID']
D = {0: ['lid'] + [h.lower() for h in header]}
for idx, row in enumerate(dataset.objects['FormTable']):
row = deepcopy(row)
row['Segments'] = ' '.join(row['Segments'])
D[idx + 1] = [row['ID']] + [row[h] for h in header]
return D
|
Make lingpy-compatible dictinary out of cldf main data.
|
def build_mine_matrix(self, w, h, minenum):
"""random fill cells with mines and increments nearest mines num in adiacent cells"""
self.minecount = 0
matrix = [[Cell(30, 30, x, y, self) for x in range(w)] for y in range(h)]
for i in range(0, minenum):
x = random.randint(0, w - 1)
y = random.randint(0, h - 1)
if matrix[y][x].has_mine:
continue
self.minecount += 1
matrix[y][x].has_mine = True
for coord in [[-1, -1], [-1, 0], [-1, 1], [0, -1], [0, 1], [1, -1], [1, 0], [1, 1]]:
_x, _y = coord
if not self.coord_in_map(x + _x, y + _y, w, h):
continue
matrix[y + _y][x + _x].add_nearest_mine()
return matrix
|
random fill cells with mines and increments nearest mines num in adiacent cells
|
def _l_cv_weight(self, donor_catchment):
"""
Return L-CV weighting for a donor catchment.
Methodology source: Science Report SC050050, eqn. 6.18 and 6.22a
"""
try:
dist = donor_catchment.similarity_dist
except AttributeError:
dist = self._similarity_distance(self.catchment, donor_catchment)
b = 0.0047 * sqrt(dist) + 0.0023 / 2
c = 0.02609 / (donor_catchment.record_length - 1)
return 1 / (b + c)
|
Return L-CV weighting for a donor catchment.
Methodology source: Science Report SC050050, eqn. 6.18 and 6.22a
|
def bait(self, maskmiddle='f', k='19'):
"""
Use bbduk to perform baiting
:param maskmiddle: boolean argument treat the middle base of a kmer as a wildcard; increases sensitivity
in the presence of errors.
:param k: keyword argument for length of kmers to use in the analyses
"""
logging.info('Performing kmer baiting of fastq files with {at} targets'.format(at=self.analysistype))
# There seems to be some sort of issue with java incorrectly calculating the total system memory on certain
# computers. For now, calculate the memory, and feed it into the bbduk call
if self.kmer_size is None:
kmer = k
else:
kmer = self.kmer_size
with progressbar(self.runmetadata) as bar:
for sample in bar:
if sample.general.bestassemblyfile != 'NA' and sample[self.analysistype].runanalysis:
# Create the folder (if necessary)
make_path(sample[self.analysistype].outputdir)
# Make the system call
if len(sample.general.fastqfiles) == 2:
# Create the command to run the baiting - paired inputs and a single, zipped output
sample[self.analysistype].bbdukcmd = \
'bbduk.sh -Xmx{mem} ref={ref} in1={in1} in2={in2} k={kmer} maskmiddle={mm} ' \
'threads={c} outm={om}' \
.format(mem=self.mem,
ref=sample[self.analysistype].baitfile,
in1=sample.general.trimmedcorrectedfastqfiles[0],
in2=sample.general.trimmedcorrectedfastqfiles[1],
kmer=kmer,
mm=maskmiddle,
c=str(self.cpus),
om=sample[self.analysistype].baitedfastq)
else:
sample[self.analysistype].bbdukcmd = \
'bbduk.sh -Xmx{mem} ref={ref} in={in1} k={kmer} maskmiddle={mm} ' \
'threads={cpus} outm={outm}' \
.format(mem=self.mem,
ref=sample[self.analysistype].baitfile,
in1=sample.general.trimmedcorrectedfastqfiles[0],
kmer=kmer,
mm=maskmiddle,
cpus=str(self.cpus),
outm=sample[self.analysistype].baitedfastq)
# Run the system call (if necessary)
if not os.path.isfile(sample[self.analysistype].baitedfastq):
out, err = run_subprocess(sample[self.analysistype].bbdukcmd)
write_to_logfile(sample[self.analysistype].bbdukcmd,
sample[self.analysistype].bbdukcmd,
self.logfile, sample.general.logout, sample.general.logerr,
sample[self.analysistype].logout, sample[self.analysistype].logerr)
write_to_logfile(out,
err,
self.logfile, sample.general.logout, sample.general.logerr,
sample[self.analysistype].logout, sample[self.analysistype].logerr)
|
Use bbduk to perform baiting
:param maskmiddle: boolean argument treat the middle base of a kmer as a wildcard; increases sensitivity
in the presence of errors.
:param k: keyword argument for length of kmers to use in the analyses
|
def split_param_vec(param_vec, rows_to_alts, design, return_all_types=False):
"""
Parameters
----------
param_vec : 1D ndarray.
Elements should all be ints, floats, or longs. Should have as many
elements as there are parameters being estimated.
rows_to_alts : 2D scipy sparse matrix.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset. All
elements should be zeros or ones.
design : 2D ndarray.
There should be one row per observation per available alternative.
There should be one column per utility coefficient being estimated. All
elements should be ints, floats, or longs.
return_all_types : bool, optional.
Determines whether or not a tuple of 4 elements will be returned (with
one element for the nest, shape, intercept, and index parameters for
this model). If False, a tuple of 3 elements will be returned, as
described below.
Returns
-------
`(None, intercepts, betas)` : tuple.
The first element will be None since the clog-log model has no shape
parameters. The second element will either be a 1D array of "outside"
intercept parameters for this model or None, depending on whether
outside intercepts are being estimated or not. The third element will
be a 1D array of the index coefficients.
Note
----
If `return_all_types == True` then the function will return a tuple of four
objects. In order, these objects will either be None or the arrays
representing the arrays corresponding to the nest, shape, intercept, and
index parameters.
"""
# Figure out how many parameters are in the index
num_index_coefs = design.shape[1]
# Isolate the initial shape parameters from the betas
betas = param_vec[-1 * num_index_coefs:]
# Get the remaining outside intercepts if there are any
remaining_idx = param_vec.shape[0] - num_index_coefs
if remaining_idx > 0:
intercepts = param_vec[:remaining_idx]
else:
intercepts = None
if return_all_types:
return None, None, intercepts, betas
else:
return None, intercepts, betas
|
Parameters
----------
param_vec : 1D ndarray.
Elements should all be ints, floats, or longs. Should have as many
elements as there are parameters being estimated.
rows_to_alts : 2D scipy sparse matrix.
There should be one row per observation per available alternative and
one column per possible alternative. This matrix maps the rows of the
design matrix to the possible alternatives for this dataset. All
elements should be zeros or ones.
design : 2D ndarray.
There should be one row per observation per available alternative.
There should be one column per utility coefficient being estimated. All
elements should be ints, floats, or longs.
return_all_types : bool, optional.
Determines whether or not a tuple of 4 elements will be returned (with
one element for the nest, shape, intercept, and index parameters for
this model). If False, a tuple of 3 elements will be returned, as
described below.
Returns
-------
`(None, intercepts, betas)` : tuple.
The first element will be None since the clog-log model has no shape
parameters. The second element will either be a 1D array of "outside"
intercept parameters for this model or None, depending on whether
outside intercepts are being estimated or not. The third element will
be a 1D array of the index coefficients.
Note
----
If `return_all_types == True` then the function will return a tuple of four
objects. In order, these objects will either be None or the arrays
representing the arrays corresponding to the nest, shape, intercept, and
index parameters.
|
def seq_site_length(self):
"""Calculate length of a single sequence site based upon relative positions specified in peak descriptions.
:return: Length of sequence site.
:rtype: :py:class:`int`
"""
relative_positions_set = set()
for peak_descr in self:
relative_positions_set.update(peak_descr.relative_positions)
return len(relative_positions_set)
|
Calculate length of a single sequence site based upon relative positions specified in peak descriptions.
:return: Length of sequence site.
:rtype: :py:class:`int`
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.