text stringlengths 78 104k | score float64 0 0.18 |
|---|---|
def get_selected_submissions(self, course, filter_type, selected_tasks, users, aggregations, stype):
"""
Returns the submissions that have been selected by the admin
:param course: course
:param filter_type: users or aggregations
:param selected_tasks: selected tasks id
:param users: selected usernames
:param aggregations: selected aggregations
:param stype: single or all submissions
:return:
"""
if filter_type == "users":
self._validate_list(users)
aggregations = list(self.database.aggregations.find({"courseid": course.get_id(),
"students": {"$in": users}}))
# Tweak if not using classrooms : classroom['students'] may content ungrouped users
aggregations = dict([(username,
aggregation if course.use_classrooms() or (
len(aggregation['groups']) and
username in aggregation['groups'][0]["students"]
) else None
) for aggregation in aggregations for username in users])
else:
self._validate_list(aggregations)
aggregations = list(
self.database.aggregations.find({"_id": {"$in": [ObjectId(cid) for cid in aggregations]}}))
# Tweak if not using classrooms : classroom['students'] may content ungrouped users
aggregations = dict([(username,
aggregation if course.use_classrooms() or (
len(aggregation['groups']) and
username in aggregation['groups'][0]["students"]
) else None
) for aggregation in aggregations for username in aggregation["students"]])
if stype == "single":
user_tasks = list(self.database.user_tasks.find({"username": {"$in": list(aggregations.keys())},
"taskid": {"$in": selected_tasks},
"courseid": course.get_id()}))
submissionsid = [user_task['submissionid'] for user_task in user_tasks if user_task['submissionid'] is not None]
submissions = list(self.database.submissions.find({"_id": {"$in": submissionsid}}))
else:
submissions = list(self.database.submissions.find({"username": {"$in": list(aggregations.keys())},
"taskid": {"$in": selected_tasks},
"courseid": course.get_id(),
"status": {"$in": ["done", "error"]}}))
return submissions, aggregations | 0.007007 |
def parse_at_root(
self,
root, # type: ET.Element
state # type: _ProcessorState
):
# type: (...) -> Any
"""Parse the given element as the root of the document."""
xml_value = self._processor.parse_at_root(root, state)
return _hooks_apply_after_parse(self._hooks, state, xml_value) | 0.011299 |
def fit(self, X, y=None):
'''
Learn the linear transformation to flipped eigenvalues.
Parameters
----------
X : array, shape [n, n]
The *symmetric* input similarities. If X is asymmetric, it will be
treated as if it were symmetric based on its lower-triangular part.
'''
n = X.shape[0]
if X.shape != (n, n):
raise TypeError("Input must be a square matrix.")
# TODO: only get negative eigs somehow?
memory = get_memory(self.memory)
vals, vecs = memory.cache(scipy.linalg.eigh, ignore=['overwrite_a'])(
X, overwrite_a=not self.copy)
vals = vals[:, None]
self.flip_ = np.dot(vecs, np.sign(vals) * vecs.T)
return self | 0.002577 |
def _set_mpls_traffic_bypass(self, v, load=False):
"""
Setter method for mpls_traffic_bypass, mapped from YANG variable /telemetry/profile/mpls_traffic_bypass (list)
If this variable is read-only (config: false) in the
source YANG file, then _set_mpls_traffic_bypass is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_mpls_traffic_bypass() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=YANGListType("name",mpls_traffic_bypass.mpls_traffic_bypass, yang_name="mpls-traffic-bypass", rest_name="mpls-traffic-bypass", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'MplstrafficbypassProfile', u'info': u'MPLS LSP profile'}}), is_container='list', yang_name="mpls-traffic-bypass", rest_name="mpls-traffic-bypass", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'MplstrafficbypassProfile', u'info': u'MPLS LSP profile'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """mpls_traffic_bypass must be of a type compatible with list""",
'defined-type': "list",
'generated-type': """YANGDynClass(base=YANGListType("name",mpls_traffic_bypass.mpls_traffic_bypass, yang_name="mpls-traffic-bypass", rest_name="mpls-traffic-bypass", parent=self, is_container='list', user_ordered=False, path_helper=self._path_helper, yang_keys='name', extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'MplstrafficbypassProfile', u'info': u'MPLS LSP profile'}}), is_container='list', yang_name="mpls-traffic-bypass", rest_name="mpls-traffic-bypass", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'cli-suppress-list-no': None, u'callpoint': u'MplstrafficbypassProfile', u'info': u'MPLS LSP profile'}}, namespace='urn:brocade.com:mgmt:brocade-telemetry', defining_module='brocade-telemetry', yang_type='list', is_config=True)""",
})
self.__mpls_traffic_bypass = t
if hasattr(self, '_set'):
self._set() | 0.004242 |
def start_exp():
""" Serves up the experiment applet. """
if not (('hitId' in request.args) and ('assignmentId' in request.args) and
('workerId' in request.args) and ('mode' in request.args)):
raise ExperimentError('hit_assign_worker_id_not_set_in_exp')
hit_id = request.args['hitId']
assignment_id = request.args['assignmentId']
worker_id = request.args['workerId']
mode = request.args['mode']
app.logger.info("Accessing /exp: %(h)s %(a)s %(w)s " % {
"h" : hit_id,
"a": assignment_id,
"w": worker_id
})
if hit_id[:5] == "debug":
debug_mode = True
else:
debug_mode = False
# Check first to see if this hitId or assignmentId exists. If so, check to
# see if inExp is set
allow_repeats = CONFIG.getboolean('HIT Configuration', 'allow_repeats')
if allow_repeats:
matches = Participant.query.\
filter(Participant.workerid == worker_id).\
filter(Participant.assignmentid == assignment_id).\
all()
else:
matches = Participant.query.\
filter(Participant.workerid == worker_id).\
all()
numrecs = len(matches)
if numrecs == 0:
# Choose condition and counterbalance
subj_cond, subj_counter = get_random_condcount(mode)
worker_ip = "UNKNOWN" if not request.remote_addr else \
request.remote_addr
browser = "UNKNOWN" if not request.user_agent.browser else \
request.user_agent.browser
platform = "UNKNOWN" if not request.user_agent.platform else \
request.user_agent.platform
language = "UNKNOWN" if not request.user_agent.language else \
request.user_agent.language
# Set condition here and insert into database.
participant_attributes = dict(
assignmentid=assignment_id,
workerid=worker_id,
hitid=hit_id,
cond=subj_cond,
counterbalance=subj_counter,
ipaddress=worker_ip,
browser=browser,
platform=platform,
language=language,
mode=mode
)
part = Participant(**participant_attributes)
db_session.add(part)
db_session.commit()
else:
# A couple possible problems here:
# 1: They've already done an assignment, then we should tell them they
# can't do another one
# 2: They've already worked on this assignment, and got too far to
# start over.
# 3: They're in the database twice for the same assignment, that should
# never happen.
# 4: They're returning and all is well.
nrecords = 0
for record in matches:
other_assignment = False
if record.assignmentid != assignment_id:
other_assignment = True
else:
nrecords += 1
if nrecords <= 1 and not other_assignment:
part = matches[0]
# In experiment (or later) can't restart at this point
if part.status >= STARTED and not debug_mode:
raise ExperimentError('already_started_exp')
else:
if nrecords > 1:
app.logger.error("Error, hit/assignment appears in database \
more than once (serious problem)")
raise ExperimentError(
'hit_assign_appears_in_database_more_than_once'
)
if other_assignment:
raise ExperimentError('already_did_exp_hit')
use_psiturk_ad_server = CONFIG.getboolean('Shell Parameters', 'use_psiturk_ad_server')
if use_psiturk_ad_server and (mode == 'sandbox' or mode == 'live'):
# If everything goes ok here relatively safe to assume we can lookup
# the ad.
ad_id = get_ad_via_hitid(hit_id)
if ad_id != "error":
if mode == "sandbox":
ad_server_location = 'https://sandbox.ad.psiturk.org/complete/'\
+ str(ad_id)
elif mode == "live":
ad_server_location = 'https://ad.psiturk.org/complete/' +\
str(ad_id)
else:
raise ExperimentError('hit_not_registered_with_ad_server')
else:
ad_server_location = '/complete'
return render_template(
'exp.html', uniqueId=part.uniqueid,
condition=part.cond,
counterbalance=part.counterbalance,
adServerLoc=ad_server_location,
mode = mode,
contact_address=CONFIG.get('HIT Configuration', 'contact_email_on_error')
) | 0.001718 |
def _auth(profile=None):
'''
Set up neutron credentials
'''
if profile:
credentials = __salt__['config.option'](profile)
user = credentials['keystone.user']
password = credentials['keystone.password']
tenant = credentials['keystone.tenant']
auth_url = credentials['keystone.auth_url']
region_name = credentials.get('keystone.region_name', None)
service_type = credentials.get('keystone.service_type', 'network')
os_auth_system = credentials.get('keystone.os_auth_system', None)
use_keystoneauth = credentials.get('keystone.use_keystoneauth', False)
verify = credentials.get('keystone.verify', True)
else:
user = __salt__['config.option']('keystone.user')
password = __salt__['config.option']('keystone.password')
tenant = __salt__['config.option']('keystone.tenant')
auth_url = __salt__['config.option']('keystone.auth_url')
region_name = __salt__['config.option']('keystone.region_name')
service_type = __salt__['config.option']('keystone.service_type')
os_auth_system = __salt__['config.option']('keystone.os_auth_system')
use_keystoneauth = __salt__['config.option']('keystone.use_keystoneauth')
verify = __salt__['config.option']('keystone.verify')
if use_keystoneauth is True:
project_domain_name = credentials['keystone.project_domain_name']
user_domain_name = credentials['keystone.user_domain_name']
kwargs = {
'username': user,
'password': password,
'tenant_name': tenant,
'auth_url': auth_url,
'region_name': region_name,
'service_type': service_type,
'os_auth_plugin': os_auth_system,
'use_keystoneauth': use_keystoneauth,
'verify': verify,
'project_domain_name': project_domain_name,
'user_domain_name': user_domain_name
}
else:
kwargs = {
'username': user,
'password': password,
'tenant_name': tenant,
'auth_url': auth_url,
'region_name': region_name,
'service_type': service_type,
'os_auth_plugin': os_auth_system
}
return suoneu.SaltNeutron(**kwargs) | 0.000864 |
def entropy_from_samples(samples, vec):
"""
Estimate H(x|s) ~= -E_{x \sim P(x|s)}[\log Q(x|s)], where x are samples, and Q is parameterized by vec.
"""
samples_cat = tf.argmax(samples[:, :NUM_CLASS], axis=1, output_type=tf.int32)
samples_uniform = samples[:, NUM_CLASS:]
cat, uniform = get_distributions(vec[:, :NUM_CLASS], vec[:, NUM_CLASS:])
def neg_logprob(dist, sample, name):
nll = -dist.log_prob(sample)
# average over batch
return tf.reduce_sum(tf.reduce_mean(nll, axis=0), name=name)
entropies = [neg_logprob(cat, samples_cat, 'nll_cat'),
neg_logprob(uniform, samples_uniform, 'nll_uniform')]
return entropies | 0.007184 |
def show_bandwidth_limit_rule(self, rule, policy, body=None):
"""Fetches information of a certain bandwidth limit rule."""
return self.get(self.qos_bandwidth_limit_rule_path %
(policy, rule), body=body) | 0.008264 |
def read_anchors(ac, qorder, sorder, minsize=0):
"""
anchors file are just (geneA, geneB) pairs (with possible deflines)
"""
all_anchors = defaultdict(list)
nanchors = 0
anchor_to_block = {}
for a, b, idx in ac.iter_pairs(minsize=minsize):
if a not in qorder or b not in sorder:
continue
qi, q = qorder[a]
si, s = sorder[b]
pair = (qi, si)
all_anchors[(q.seqid, s.seqid)].append(pair)
anchor_to_block[pair] = idx
nanchors += 1
logging.debug("A total of {0} anchors imported.".format(nanchors))
assert nanchors == len(anchor_to_block)
return all_anchors, anchor_to_block | 0.001468 |
def _is_auth_info_available():
"""Check if user auth info has been set in environment variables."""
return (_ENDPOINTS_USER_INFO in os.environ or
(_ENV_AUTH_EMAIL in os.environ and _ENV_AUTH_DOMAIN in os.environ) or
_ENV_USE_OAUTH_SCOPE in os.environ) | 0.010909 |
def _init(creds, bucket, multiple_env, environment, prefix, s3_cache_expire):
'''
Connect to S3 and download the metadata for each file in all buckets
specified and cache the data to disk.
'''
cache_file = _get_buckets_cache_filename(bucket, prefix)
exp = time.time() - s3_cache_expire
# check if cache_file exists and its mtime
if os.path.isfile(cache_file):
cache_file_mtime = os.path.getmtime(cache_file)
else:
# file does not exists then set mtime to 0 (aka epoch)
cache_file_mtime = 0
expired = (cache_file_mtime <= exp)
log.debug(
'S3 bucket cache file %s is %sexpired, mtime_diff=%ss, expiration=%ss',
cache_file,
'' if expired else 'not ',
cache_file_mtime - exp,
s3_cache_expire
)
if expired:
pillars = _refresh_buckets_cache_file(creds, cache_file, multiple_env,
environment, prefix)
else:
pillars = _read_buckets_cache_file(cache_file)
log.debug('S3 bucket retrieved pillars %s', pillars)
return pillars | 0.001807 |
def list_audit_sink(self, **kwargs):
"""
list or watch objects of kind AuditSink
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_audit_sink(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str pretty: If 'true', then the output is pretty printed.
:param str _continue: The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \"next key\". This field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.
:param str field_selector: A selector to restrict the list of returned objects by their fields. Defaults to everything.
:param str label_selector: A selector to restrict the list of returned objects by their labels. Defaults to everything.
:param int limit: limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true. The server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.
:param str resource_version: When specified with a watch call, shows changes that occur after that particular version of a resource. Defaults to changes from the beginning of history. When specified for list: - if unset, then the result is returned from remote storage based on quorum-read flag; - if it's 0, then we simply return what we currently have in cache, no guarantee; - if set to non zero, then the result is at least as fresh as given rv.
:param int timeout_seconds: Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.
:param bool watch: Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.
:return: V1alpha1AuditSinkList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_audit_sink_with_http_info(**kwargs)
else:
(data) = self.list_audit_sink_with_http_info(**kwargs)
return data | 0.001994 |
def _labeledInput(activeInputs, cellsPerCol=32):
"""Print the list of [column, cellIdx] indices for each of the active
cells in activeInputs.
"""
if cellsPerCol == 0:
cellsPerCol = 1
cols = activeInputs.size / cellsPerCol
activeInputs = activeInputs.reshape(cols, cellsPerCol)
(cols, cellIdxs) = activeInputs.nonzero()
if len(cols) == 0:
return "NONE"
items = ["(%d): " % (len(cols))]
prevCol = -1
for (col,cellIdx) in zip(cols, cellIdxs):
if col != prevCol:
if prevCol != -1:
items.append("] ")
items.append("Col %d: [" % col)
prevCol = col
items.append("%d," % cellIdx)
items.append("]")
return " ".join(items) | 0.023392 |
def _get_params(self, rdata):
"""
Returns a list of jsonrpc request's method parameters.
"""
if 'params' in rdata:
if isinstance(rdata['params'], dict) \
or isinstance(rdata['params'], list) \
or rdata['params'] is None:
return rdata['params']
else:
# wrong type
raise InvalidRequestError
else:
return None | 0.004255 |
def _clear(self):
"""
Clear the current image.
"""
self._plain_image = [" " * self._width for _ in range(self._height)]
self._colour_map = [[(None, 0, 0) for _ in range(self._width)]
for _ in range(self._height)] | 0.007143 |
def visit_assignment(self, node, children):
"""
Create parser rule for assignments and register attribute types
on metaclass.
"""
attr_name = children[0]
op = children[1]
rhs_rule, modifiers = children[2]
cls = self._current_cls
target_cls = None
if self.debug:
self.dprint("Processing assignment {}{}..."
.format(attr_name, op))
if self.debug:
self.dprint("Creating attribute {}:{}".format(cls.__name__,
attr_name))
self.dprint("Assignment operation = {}".format(op))
if attr_name in cls._tx_attrs:
# If attribute already exists in the metamodel it is
# multiple assignment to the same attribute.
# Cannot use operator ?= on multiple assignments
if op == '?=':
line, col = self.grammar_parser.pos_to_linecol(node.position)
raise TextXSemanticError(
'Cannot use "?=" operator on multiple'
' assignments for attribute "{}" at {}'
.format(attr_name, (line, col)), line, col)
cls_attr = cls._tx_attrs[attr_name]
else:
cls_attr = self.metamodel._new_cls_attr(cls, name=attr_name,
position=node.position)
# Keep track of metaclass references and containments
if type(rhs_rule) is tuple and rhs_rule[0] == "obj_ref":
cls_attr.cont = False
cls_attr.ref = True
# Override rhs by its PEG rule for further processing
rhs_rule = rhs_rule[1]
# Target class is not the same as target rule
target_cls = rhs_rule.cls
base_rule_name = rhs_rule.rule_name
if op == '+=':
assignment_rule = OneOrMore(
nodes=[rhs_rule],
rule_name='__asgn_oneormore', root=True)
cls_attr.mult = MULT_ONEORMORE
elif op == '*=':
assignment_rule = ZeroOrMore(
nodes=[rhs_rule],
rule_name='__asgn_zeroormore', root=True)
if cls_attr.mult is not MULT_ONEORMORE:
cls_attr.mult = MULT_ZEROORMORE
elif op == '?=':
assignment_rule = Optional(
nodes=[rhs_rule],
rule_name='__asgn_optional', root=True)
cls_attr.mult = MULT_OPTIONAL
base_rule_name = 'BOOL'
# ?= assigment should have default value of False.
# so we shall mark it as such.
cls_attr.bool_assignment = True
else:
assignment_rule = Sequence(
nodes=[rhs_rule],
rule_name='__asgn_plain', root=True)
# Modifiers
if modifiers:
modifiers, position = modifiers
# Sanity check. Modifiers do not make
# sense for ?= and = operator at the moment.
if op == '?=' or op == '=':
line, col = self.grammar_parser.pos_to_linecol(position)
raise TextXSyntaxError(
'Modifiers are not allowed for "{}" operator at {}'
.format(op, text((line, col))), line, col)
# Separator modifier
assignment_rule.sep = modifiers.get('sep', None)
# End of line termination modifier
if 'eolterm' in modifiers:
assignment_rule.eolterm = True
if target_cls:
attr_type = target_cls
else:
# Use STRING as default attr class
attr_type = base_rule_name if base_rule_name else 'STRING'
if not cls_attr.cls:
cls_attr.cls = ClassCrossRef(cls_name=attr_type,
position=node.position)
else:
# cls cross ref might already be set in case of multiple assignment
# to the same attribute. If types are not the same we shall use
# OBJECT as generic type.
if cls_attr.cls.cls_name != attr_type:
cls_attr.cls.cls_name = 'OBJECT'
if self.debug:
self.dprint("Created attribute {}:{}[cls={}, cont={}, "
"ref={}, mult={}, pos={}]"
.format(cls.__name__, attr_name, cls_attr.cls.cls_name,
cls_attr.cont, cls_attr.ref, cls_attr.mult,
cls_attr.position))
assignment_rule._attr_name = attr_name
assignment_rule._exp_str = attr_name # For nice error reporting
return assignment_rule | 0.000423 |
def get_varval_from_locals(key, locals_, strict=False):
"""
Returns a variable value from locals.
Different from locals()['varname'] because
get_varval_from_locals('varname.attribute', locals())
is allowed
"""
assert isinstance(key, six.string_types), 'must have parsed key into a string already'
if key not in locals_:
dotpos = key.find('.')
if dotpos > -1:
key_ = key[:dotpos]
attrstr_ = key[dotpos:]
try:
baseval = locals_[key_] # NOQA
val = eval('baseval' + attrstr_)
except Exception as ex:
if strict:
raise
val = ex
else:
raise AssertionError('%s = NameError' % (key))
else:
val = locals_[key]
return val | 0.002418 |
def depopulate(self, is_update):
"""Get all the fields that need to be saved
:param is_udpate: bool, True if update query, False if insert
:returns: dict, key is field_name and val is the field value to be saved
"""
fields = {}
schema = self.schema
for k, field in schema.fields.items():
is_modified = k in self.modified_fields
orig_v = getattr(self, k)
v = field.iset(
self,
orig_v,
is_update=is_update,
is_modified=is_modified
)
if is_modified or v is not None:
if is_update and field.is_pk() and v == orig_v:
continue
else:
fields[k] = v
if not is_update:
for field_name in schema.required_fields.keys():
if field_name not in fields:
raise KeyError("Missing required field {}".format(field_name))
return fields | 0.003868 |
def mod_watch(name,
sfun=None,
sig=None,
full_restart=False,
init_delay=None,
force=False,
**kwargs):
'''
The service watcher, called to invoke the watch command.
When called, it will restart or reload the named service.
.. note::
This state exists to support special handling of the ``watch``
:ref:`requisite <requisites>`. It should not be called directly.
Parameters for this function should be set by the watching service.
(i.e. ``service.running``)
name
The name of the init or rc script used to manage the service
sfun
The original function which triggered the mod_watch call
(`service.running`, for example).
sig
The string to search for when looking for the service process with ps
reload
If True use reload instead of the default restart. If value is a list of
requisites; reload only if all watched changes are contained in the reload list.
Otherwise watch will restart.
full_restart
Use service.full_restart instead of restart.
When set, reload the service instead of restarting it.
(i.e. ``service nginx reload``)
full_restart
Perform a full stop/start of a service by passing ``--full-restart``.
This option is ignored if ``reload`` is set and is supported by only a few
:py:func:`service modules <salt.modules.service>`.
force
Use service.force_reload instead of reload (needs reload to be set to True)
init_delay
Add a sleep command (in seconds) before the service is restarted/reloaded
'''
reload_ = kwargs.pop('reload', False)
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
past_participle = None
if sfun == 'dead':
verb = 'stop'
past_participle = verb + 'ped'
if __salt__['service.status'](name, sig):
func = __salt__['service.stop']
else:
ret['result'] = True
ret['comment'] = 'Service is already {0}'.format(past_participle)
return ret
elif sfun == 'running':
if __salt__['service.status'](name, sig):
if 'service.reload' in __salt__ and reload_:
if isinstance(reload_, list):
only_reload_needed = True
for watch_item in kwargs['__reqs__']['watch']:
if __running__[_gen_tag(watch_item)]['changes']:
match_found = False
for this_reload in reload_:
for state, id_ in six.iteritems(this_reload):
if state == watch_item['state'] \
and id_ == watch_item['__id__']:
match_found = True
if not match_found:
only_reload_needed = False
if only_reload_needed:
if 'service.force_reload' in __salt__ and force:
func = __salt__['service.force_reload']
verb = 'forcefully reload'
else:
func = __salt__['service.reload']
verb = 'reload'
else:
if 'service.full_restart' in __salt__ and full_restart:
func = __salt__['service.full_restart']
verb = 'fully restart'
else:
func = __salt__['service.restart']
verb = 'restart'
else:
if 'service.force_reload' in __salt__ and force:
func = __salt__['service.force_reload']
verb = 'forcefully reload'
else:
func = __salt__['service.reload']
verb = 'reload'
elif 'service.full_restart' in __salt__ and full_restart:
func = __salt__['service.full_restart']
verb = 'fully restart'
else:
func = __salt__['service.restart']
verb = 'restart'
else:
func = __salt__['service.start']
verb = 'start'
if not past_participle:
past_participle = verb + 'ed'
else:
ret['comment'] = 'Unable to trigger watch for service.{0}'.format(sfun)
ret['result'] = False
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'Service is set to be {0}'.format(past_participle)
return ret
if verb == 'start' and 'service.stop' in __salt__:
# stop service before start
__salt__['service.stop'](name)
func_kwargs, warnings = _get_systemd_only(func, kwargs)
if warnings:
ret.setdefault('warnings', []).extend(warnings)
try:
result = func(name, **func_kwargs)
except CommandExecutionError as exc:
ret['result'] = False
ret['comment'] = exc.strerror
return ret
if init_delay:
time.sleep(init_delay)
ret['changes'] = {name: result}
ret['result'] = result
ret['comment'] = 'Service {0}'.format(past_participle) if result else \
'Failed to {0} the service'.format(verb)
return ret | 0.001079 |
def _format(color, style=''):
"""Return a QTextCharFormat with the given attributes.
"""
_color = QColor()
_color.setNamedColor(color)
_format = QTextCharFormat()
_format.setForeground(_color)
if 'bold' in style:
_format.setFontWeight(QFont.Bold)
if 'italic' in style:
_format.setFontItalic(True)
return _format | 0.002646 |
def plot(x, y, z, ax=None, **kwargs):
r"""
Plot iso-probability mass function, converted to sigmas.
Parameters
----------
x, y, z : numpy arrays
Same as arguments to :func:`matplotlib.pyplot.contour`
ax: axes object, optional
:class:`matplotlib.axes._subplots.AxesSubplot` to plot the contours
onto. If unsupplied, then :func:`matplotlib.pyplot.gca()` is used to
get the last axis used, or create a new one.
colors: color scheme, optional
:class:`matplotlib.colors.LinearSegmentedColormap`
Color scheme to plot with. Recommend plotting in reverse
(Default: :class:`matplotlib.pyplot.cm.Reds_r`)
smooth: float, optional
Percentage by which to smooth the contours.
(Default: no smoothing)
contour_line_levels: List[float], optional
Contour lines to be plotted. (Default: [1,2])
linewidths: float, optional
Thickness of contour lines. (Default: 0.3)
contour_color_levels: List[float], optional
Contour color levels.
(Default: `numpy.arange(0, contour_line_levels[-1] + 1, fineness)`)
fineness: float, optional
Spacing of contour color levels. (Default: 0.1)
lines: bool, optional
(Default: True)
rasterize_contours: bool, optional
Rasterize the contours while keeping the lines, text etc in vector
format. Useful for reducing file size bloat and making printing
easier when you have dense contours.
(Default: False)
Returns
-------
cbar: color bar
:class:`matplotlib.contour.QuadContourSet`
Colors to create a global colour bar
"""
if ax is None:
ax = matplotlib.pyplot.gca()
# Get inputs
colors = kwargs.pop('colors', matplotlib.pyplot.cm.Reds_r)
smooth = kwargs.pop('smooth', False)
linewidths = kwargs.pop('linewidths', 0.3)
contour_line_levels = kwargs.pop('contour_line_levels', [1, 2, 3])
fineness = kwargs.pop('fineness', 0.5)
default_color_levels = numpy.arange(0, contour_line_levels[-1] + 1,
fineness)
contour_color_levels = kwargs.pop('contour_color_levels',
default_color_levels)
rasterize_contours = kwargs.pop('rasterize_contours', False)
lines = kwargs.pop('lines', True)
if kwargs:
raise TypeError('Unexpected **kwargs: %r' % kwargs)
# Convert to sigmas
z = numpy.sqrt(2) * scipy.special.erfinv(1 - z)
# Gaussian filter if desired the sigmas by a factor of smooth%
if smooth:
sigma = smooth*numpy.array(z.shape)/100.0
z = scipy.ndimage.gaussian_filter(z, sigma=sigma, order=0)
# Plot the filled contours onto the axis ax
cbar = ax.contourf(x, y, z, cmap=colors, levels=contour_color_levels)
# Rasterize contours (the rest of the figure stays in vector format)
if rasterize_contours:
for c in cbar.collections:
c.set_rasterized(True)
# Remove those annoying white lines
for c in cbar.collections:
c.set_edgecolor("face")
# Plot some sigma-based contour lines
if lines:
ax.contour(x, y, z, colors='k', linewidths=linewidths,
levels=contour_line_levels)
# Return the contours for use as a colourbar later
return cbar | 0.000298 |
def zip_dict(a: Dict[str, A], b: Dict[str, B]) \
-> Dict[str, Tuple[Optional[A], Optional[B]]]:
"""
Combine the values within two dictionaries by key.
:param a: The first dictionary.
:param b: The second dictionary.
:return: A dictionary containing all keys that appear in the union of a and
b. Values are pairs where the first part is a's value for the key,
and right second part b's value.
"""
return {key: (a.get(key), b.get(key)) for key in a.keys() | b.keys()} | 0.001894 |
def _make_sentence(txt):
"""Make a sentence from a piece of text."""
#Make sure first letter is capitalized
txt = txt.strip(' ')
txt = txt[0].upper() + txt[1:] + '.'
return txt | 0.010204 |
def _construct_stage(self, deployment, swagger):
"""Constructs and returns the ApiGateway Stage.
:param model.apigateway.ApiGatewayDeployment deployment: the Deployment for this Stage
:returns: the Stage to which this SAM Api corresponds
:rtype: model.apigateway.ApiGatewayStage
"""
# If StageName is some intrinsic function, then don't prefix the Stage's logical ID
# This will NOT create duplicates because we allow only ONE stage per API resource
stage_name_prefix = self.stage_name if isinstance(self.stage_name, string_types) else ""
stage = ApiGatewayStage(self.logical_id + stage_name_prefix + 'Stage',
attributes=self.passthrough_resource_attributes)
stage.RestApiId = ref(self.logical_id)
stage.update_deployment_ref(deployment.logical_id)
stage.StageName = self.stage_name
stage.CacheClusterEnabled = self.cache_cluster_enabled
stage.CacheClusterSize = self.cache_cluster_size
stage.Variables = self.variables
stage.MethodSettings = self.method_settings
stage.AccessLogSetting = self.access_log_setting
stage.CanarySetting = self.canary_setting
stage.TracingEnabled = self.tracing_enabled
if swagger is not None:
deployment.make_auto_deployable(stage, swagger)
return stage | 0.005 |
def outfile(self, p):
"""Path for an output file.
If :attr:`outdir` is set then the path is
``outdir/basename(p)`` else just ``p``
"""
if self.outdir is not None:
return os.path.join(self.outdir, os.path.basename(p))
else:
return p | 0.006579 |
def resample(self,N,**kwargs):
"""Random resampling of the doublegauss distribution
"""
lovals = self.mu - np.absolute(rand.normal(size=N)*self.siglo)
hivals = self.mu + np.absolute(rand.normal(size=N)*self.sighi)
u = rand.random(size=N)
hi = (u < float(self.sighi)/(self.sighi + self.siglo))
lo = (u >= float(self.sighi)/(self.sighi + self.siglo))
vals = np.zeros(N)
vals[hi] = hivals[hi]
vals[lo] = lovals[lo]
return vals | 0.007797 |
def info(self, cloud=None, api_key=None, version=None, **kwargs):
"""
Return the current state of the model associated with a given collection
"""
url_params = {"batch": False, "api_key": api_key, "version": version, "method": "info"}
return self._api_handler(None, cloud=cloud, api="custom", url_params=url_params, **kwargs) | 0.013699 |
def reload(self):
"""Reload server configuration."""
status = self.get_status()
if status != 'running':
raise ClusterError('cannot reload: cluster is not running')
process = subprocess.run(
[self._pg_ctl, 'reload', '-D', self._data_dir],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stderr = process.stderr
if process.returncode != 0:
raise ClusterError(
'pg_ctl stop exited with status {:d}: {}'.format(
process.returncode, stderr.decode())) | 0.003448 |
def Page_setDeviceMetricsOverride(self, width, height, deviceScaleFactor,
mobile, **kwargs):
"""
Function path: Page.setDeviceMetricsOverride
Domain: Page
Method name: setDeviceMetricsOverride
WARNING: This function is marked 'Experimental'!
Parameters:
Required arguments:
'width' (type: integer) -> Overriding width value in pixels (minimum 0, maximum 10000000). 0 disables the override.
'height' (type: integer) -> Overriding height value in pixels (minimum 0, maximum 10000000). 0 disables the override.
'deviceScaleFactor' (type: number) -> Overriding device scale factor value. 0 disables the override.
'mobile' (type: boolean) -> Whether to emulate mobile device. This includes viewport meta tag, overlay scrollbars, text autosizing and more.
Optional arguments:
'scale' (type: number) -> Scale to apply to resulting view image.
'screenWidth' (type: integer) -> Overriding screen width value in pixels (minimum 0, maximum 10000000).
'screenHeight' (type: integer) -> Overriding screen height value in pixels (minimum 0, maximum 10000000).
'positionX' (type: integer) -> Overriding view X position on screen in pixels (minimum 0, maximum 10000000).
'positionY' (type: integer) -> Overriding view Y position on screen in pixels (minimum 0, maximum 10000000).
'dontSetVisibleSize' (type: boolean) -> Do not set visible view size, rely upon explicit setVisibleSize call.
'screenOrientation' (type: Emulation.ScreenOrientation) -> Screen orientation override.
No return value.
Description: Overrides the values of device screen dimensions (window.screen.width, window.screen.height, window.innerWidth, window.innerHeight, and "device-width"/"device-height"-related CSS media query results).
"""
assert isinstance(width, (int,)
), "Argument 'width' must be of type '['int']'. Received type: '%s'" % type(
width)
assert isinstance(height, (int,)
), "Argument 'height' must be of type '['int']'. Received type: '%s'" % type(
height)
assert isinstance(deviceScaleFactor, (float, int)
), "Argument 'deviceScaleFactor' must be of type '['float', 'int']'. Received type: '%s'" % type(
deviceScaleFactor)
assert isinstance(mobile, (bool,)
), "Argument 'mobile' must be of type '['bool']'. Received type: '%s'" % type(
mobile)
if 'scale' in kwargs:
assert isinstance(kwargs['scale'], (float, int)
), "Optional argument 'scale' must be of type '['float', 'int']'. Received type: '%s'" % type(
kwargs['scale'])
if 'screenWidth' in kwargs:
assert isinstance(kwargs['screenWidth'], (int,)
), "Optional argument 'screenWidth' must be of type '['int']'. Received type: '%s'" % type(
kwargs['screenWidth'])
if 'screenHeight' in kwargs:
assert isinstance(kwargs['screenHeight'], (int,)
), "Optional argument 'screenHeight' must be of type '['int']'. Received type: '%s'" % type(
kwargs['screenHeight'])
if 'positionX' in kwargs:
assert isinstance(kwargs['positionX'], (int,)
), "Optional argument 'positionX' must be of type '['int']'. Received type: '%s'" % type(
kwargs['positionX'])
if 'positionY' in kwargs:
assert isinstance(kwargs['positionY'], (int,)
), "Optional argument 'positionY' must be of type '['int']'. Received type: '%s'" % type(
kwargs['positionY'])
if 'dontSetVisibleSize' in kwargs:
assert isinstance(kwargs['dontSetVisibleSize'], (bool,)
), "Optional argument 'dontSetVisibleSize' must be of type '['bool']'. Received type: '%s'" % type(
kwargs['dontSetVisibleSize'])
expected = ['scale', 'screenWidth', 'screenHeight', 'positionX',
'positionY', 'dontSetVisibleSize', 'screenOrientation']
passed_keys = list(kwargs.keys())
assert all([(key in expected) for key in passed_keys]
), "Allowed kwargs are ['scale', 'screenWidth', 'screenHeight', 'positionX', 'positionY', 'dontSetVisibleSize', 'screenOrientation']. Passed kwargs: %s" % passed_keys
subdom_funcs = self.synchronous_command('Page.setDeviceMetricsOverride',
width=width, height=height, deviceScaleFactor=deviceScaleFactor,
mobile=mobile, **kwargs)
return subdom_funcs | 0.032563 |
def propagate_name_down(self, col_name, df_name, verbose=False):
"""
Put the data for "col_name" into dataframe with df_name
Used to add 'site_name' to specimen table, for example.
"""
if df_name not in self.tables:
table = self.add_magic_table(df_name)[1]
if is_null(table):
return
df = self.tables[df_name].df
if col_name in df.columns:
if all(df[col_name].apply(not_null)):
#print('{} already in {}'.format(col_name, df_name))
return df
# otherwise, do necessary merges to get col_name into df
# get names for each level
grandparent_table_name = col_name.split('_')[0] + "s"
grandparent_name = grandparent_table_name[:-1]
ind = self.ancestry.index(grandparent_table_name) - 1
#
parent_table_name, parent_name = self.get_table_name(ind)
child_table_name, child_name = self.get_table_name(ind - 1)
bottom_table_name, bottom_name = self.get_table_name(ind - 2)
# merge in bottom level
if child_name not in df.columns:
# add child table if missing
if bottom_table_name not in self.tables:
result = self.add_magic_table(bottom_table_name)[1]
if not isinstance(result, MagicDataFrame):
if verbose:
print("-W- Couldn't read in {} data for data propagation".format(bottom_table_name))
return df
# add child_name to df
add_df = self.tables[bottom_table_name].df
# drop duplicate names
add_df = add_df.drop_duplicates(subset=bottom_name)
if child_name not in df.columns:
if verbose:
print("-W- Cannot complete propagation, {} table is missing {} column".format(df_name, child_name))
else:
add_df = stringify_col(add_df, child_name)
df = stringify_col(df, bottom_name)
df = df.merge(add_df[[child_name]],
left_on=[bottom_name],
right_index=True, how="left")
self.tables[df_name].df = df
# merge in one level above
if parent_name not in df.columns:
# add parent_table if missing
if child_table_name not in self.tables:
result = self.add_magic_table(child_table_name)[1]
if not isinstance(result, MagicDataFrame):
if verbose:
print("-W- Couldn't read in {} data".format(child_table_name))
print("-I- Make sure you've provided the correct file name")
return df
# add parent_name to df
add_df = self.tables[child_table_name].df
# drop duplicate names
add_df = add_df.drop_duplicates(subset=child_name)
if parent_name not in add_df:
if verbose:
print('-W- could not finish propagating names: {} table is missing {} column'.format(child_table_name, parent_name))
elif parent_name not in df:
if verbose:
print('-W- could not finish propagating names: {} table is missing {} column'.format(df_name, parent_name))
else:
add_df = stringify_col(add_df, parent_name)
df = stringify_col(df, child_name)
df = df.merge(add_df[[parent_name]],
left_on=[child_name],
right_index=True, how="left")
self.tables[df_name].df = df
# merge in two levels above
if grandparent_name not in df.columns:
# add grandparent table if it is missing
if parent_table_name not in self.tables:
result = self.add_magic_table(parent_table_name)[1]
if not isinstance(result, MagicDataFrame):
if verbose:
print("-W- Couldn't read in {} data".format(parent_table_name))
print("-I- Make sure you've provided the correct file name")
return df
# add grandparent name to df
add_df = self.tables[parent_table_name].df
# drop duplicate names
add_df = add_df.drop_duplicates(subset=parent_name)
if grandparent_name not in add_df.columns:
if verbose:
print('-W- could not finish propagating names: {} table is missing {} column'.format(parent_table_name, grandparent_name))
elif parent_name not in df.columns:
if verbose:
print('-W- could not finish propagating names: {} table is missing {} column'.format(df_name, parent_name))
else:
add_df = stringify_col(add_df, grandparent_name)
df = stringify_col(df, parent_name)
df = df.merge(add_df[[grandparent_name]],
left_on=[parent_name],
right_index=True, how="left")
df = stringify_col(df, grandparent_name)
# update the Contribution
self.tables[df_name].df = df
return df | 0.002429 |
def list_pages_ajax(request, invalid_move=False):
"""Render pages table for ajax function."""
language = get_language_from_request(request)
pages = Page.objects.root()
context = {
'invalid_move': invalid_move,
'language': language,
'pages': pages,
}
return render_to_response("admin/basic_cms/page/change_list_table.html",
context,
context_instance=RequestContext(request)
) | 0.00905 |
def analyse(self, name):
"""
reads the specified file.
:param name: the name.
:return: the analysis as frequency/Pxx.
"""
if name in self._cache:
target = self._cache[name]
if target['type'] == 'wav':
signal = self._uploadController.loadSignal(target['filename'],
start=target['start'] if target['start'] != 'start' else None,
end=target['end'] if target['end'] != 'end' else None)
if signal is not None:
# TODO allow user defined window
return getattr(signal, target['analysis'])(ref=1.0)
else:
return None, 404
pass
elif target['type'] == 'hinge':
hingePoints = np.array(target['hinge']).astype(np.float64)
x = hingePoints[:, 1]
y = hingePoints[:, 0]
# extend as straight line from 0 to 500
if x[0] != 0:
x = np.insert(x, 0, 0.0000001)
y = np.insert(y, 0, y[0])
if x[-1] != 500:
x = np.insert(x, len(x), 500.0)
y = np.insert(y, len(y), y[-1])
# convert the y axis dB values into a linear value
y = 10 ** (y / 10)
# perform a logspace interpolation
f = self.log_interp1d(x, y)
# remap to 0-500
xnew = np.linspace(x[0], x[-1], num=500, endpoint=False)
# and convert back to dB
return xnew, 10 * np.log10(f(xnew))
else:
logger.error('Unknown target type with name ' + name)
return None | 0.002162 |
def run_service_actions(self):
"""Run any actions on services requested."""
if not self.service_actions:
return
for svc_action in self.service_actions:
name = svc_action['service']
actions = svc_action['actions']
log("Running service '%s' actions '%s'" % (name, actions),
level=DEBUG)
for action in actions:
cmd = ['service', name, action]
try:
check_call(cmd)
except CalledProcessError as exc:
log("Service name='%s' action='%s' failed - %s" %
(name, action, exc), level=WARNING) | 0.002886 |
def magicrun(
text,
shell,
prompt_template="default",
aliases=None,
envvars=None,
extra_commands=None,
speed=1,
test_mode=False,
commentecho=False,
):
"""Echo out each character in ``text`` as keyboard characters are pressed,
wait for a RETURN keypress, then run the ``text`` in a shell context.
"""
goto_regulartype = magictype(text, prompt_template, speed)
if goto_regulartype:
return goto_regulartype
run_command(
text,
shell,
aliases=aliases,
envvars=envvars,
extra_commands=extra_commands,
test_mode=test_mode,
)
return goto_regulartype | 0.001504 |
def get_dirs_differance(self):
'''
Makes final versions of site_packages and scripts using DirsContent
sub method and filters
'''
try:
diff = self.dirs_after_install - self.dirs_before_install
except ValueError:
raise VirtualenvFailException(
"Some of the DirsContent attributes is uninicialized")
self.data['has_pth'] = \
any([x for x in diff.lib_sitepackages if x.endswith('.pth')])
site_packages = site_packages_filter(diff.lib_sitepackages)
self.data['packages'] = sorted(
[p for p in site_packages if not p.endswith(MODULE_SUFFIXES)])
self.data['py_modules'] = sorted(set(
[os.path.splitext(m)[0] for m in site_packages - set(
self.data['packages'])]))
self.data['scripts'] = scripts_filter(sorted(diff.bindir))
logger.debug('Data from files differance in virtualenv:')
logger.debug(pprint.pformat(self.data)) | 0.001978 |
def translate_formes_visuelles(s):
"""s.u.-'O:M:.-'O:.-',+s.u.-'M:O:.-O:.-'M:.-', => b.-S:.U:.-'O:M:.-'O:.-', + b.-S:.U:.-'M:O:.-O:.-'M:.-',"""
def set_bSU_subst(s):
subst, attr, mode = s
return m(script("b.-S:.U:.-'"), attr, mode)
if isinstance(s, AdditiveScript):
return AdditiveScript([set_bSU_subst(i) for i in s.children])
else:
return set_bSU_subst(s) | 0.004914 |
def exit(self):
"""Overwrite the exit method to close the GPU API."""
if self.nvml_ready:
try:
pynvml.nvmlShutdown()
except Exception as e:
logger.debug("pynvml failed to shutdown correctly ({})".format(e))
# Call the father exit method
super(Plugin, self).exit() | 0.008523 |
def _encrypt(key_data, derived_key_information):
"""
Encrypt 'key_data' using the Advanced Encryption Standard (AES-256) algorithm.
'derived_key_information' should contain a key strengthened by PBKDF2. The
key size is 256 bits and AES's mode of operation is set to CTR (CounTeR Mode).
The HMAC of the ciphertext is generated to ensure the ciphertext has not been
modified.
'key_data' is the JSON string representation of the key. In the case
of RSA keys, this format would be 'securesystemslib.formats.RSAKEY_SCHEMA':
{'keytype': 'rsa',
'keyval': {'public': '-----BEGIN RSA PUBLIC KEY----- ...',
'private': '-----BEGIN RSA PRIVATE KEY----- ...'}}
'derived_key_information' is a dictionary of the form:
{'salt': '...',
'derived_key': '...',
'iterations': '...'}
'securesystemslib.exceptions.CryptoError' raised if the encryption fails.
"""
# Generate a random Initialization Vector (IV). Follow the provably secure
# encrypt-then-MAC approach, which affords the ability to verify ciphertext
# without needing to decrypt it and preventing an attacker from feeding the
# block cipher malicious data. Modes like GCM provide both encryption and
# authentication, whereas CTR only provides encryption.
# Generate a random 128-bit IV. Random bits of data is needed for salts and
# initialization vectors suitable for the encryption algorithms used in
# 'pyca_crypto_keys.py'.
iv = os.urandom(16)
# Construct an AES-CTR Cipher object with the given key and a randomly
# generated IV.
symmetric_key = derived_key_information['derived_key']
encryptor = Cipher(algorithms.AES(symmetric_key), modes.CTR(iv),
backend=default_backend()).encryptor()
# Encrypt the plaintext and get the associated ciphertext.
# Do we need to check for any exceptions?
ciphertext = encryptor.update(key_data.encode('utf-8')) + encryptor.finalize()
# Generate the hmac of the ciphertext to ensure it has not been modified.
# The decryption routine may verify a ciphertext without having to perform
# a decryption operation.
symmetric_key = derived_key_information['derived_key']
salt = derived_key_information['salt']
hmac_object = \
cryptography.hazmat.primitives.hmac.HMAC(symmetric_key, hashes.SHA256(),
backend=default_backend())
hmac_object.update(ciphertext)
hmac_value = binascii.hexlify(hmac_object.finalize())
# Store the number of PBKDF2 iterations used to derive the symmetric key so
# that the decryption routine can regenerate the symmetric key successfully.
# The PBKDF2 iterations are allowed to vary for the keys loaded and saved.
iterations = derived_key_information['iterations']
# Return the salt, iterations, hmac, initialization vector, and ciphertext
# as a single string. These five values are delimited by
# '_ENCRYPTION_DELIMITER' to make extraction easier. This delimiter is
# arbitrarily chosen and should not occur in the hexadecimal representations
# of the fields it is separating.
return binascii.hexlify(salt).decode() + _ENCRYPTION_DELIMITER + \
str(iterations) + _ENCRYPTION_DELIMITER + \
hmac_value.decode() + _ENCRYPTION_DELIMITER + \
binascii.hexlify(iv).decode() + _ENCRYPTION_DELIMITER + \
binascii.hexlify(ciphertext).decode() | 0.012666 |
def loudness(self, gain_db=-10.0, reference_level=65.0):
'''Loudness control. Similar to the gain effect, but provides
equalisation for the human auditory system.
The gain is adjusted by gain_db and the signal is equalised according
to ISO 226 w.r.t. reference_level.
Parameters
----------
gain_db : float, default=-10.0
Loudness adjustment amount (in dB)
reference_level : float, default=65.0
Reference level (in dB) according to which the signal is equalized.
Must be between 50 and 75 (dB)
See Also
--------
gain
'''
if not is_number(gain_db):
raise ValueError('gain_db must be a number.')
if not is_number(reference_level):
raise ValueError('reference_level must be a number')
if reference_level > 75 or reference_level < 50:
raise ValueError('reference_level must be between 50 and 75')
effect_args = [
'loudness',
'{:f}'.format(gain_db),
'{:f}'.format(reference_level)
]
self.effects.extend(effect_args)
self.effects_log.append('loudness')
return self | 0.001622 |
def get_gain(data, attr, class_attr,
method=DEFAULT_DISCRETE_METRIC,
only_sub=0, prefer_fewer_values=False, entropy_func=None):
"""
Calculates the information gain (reduction in entropy) that would
result by splitting the data on the chosen attribute (attr).
Parameters:
prefer_fewer_values := Weights the gain by the count of the attribute's
unique values. If multiple attributes have the same gain, but one has
slightly fewer attributes, this will cause the one with fewer
attributes to be preferred.
"""
entropy_func = entropy_func or entropy
val_freq = defaultdict(float)
subset_entropy = 0.0
# Calculate the frequency of each of the values in the target attribute
for record in data:
val_freq[record.get(attr)] += 1.0
# Calculate the sum of the entropy for each subset of records weighted
# by their probability of occuring in the training set.
for val in val_freq.keys():
val_prob = val_freq[val] / sum(val_freq.values())
data_subset = [record for record in data if record.get(attr) == val]
e = entropy_func(data_subset, class_attr, method=method)
subset_entropy += val_prob * e
if only_sub:
return subset_entropy
# Subtract the entropy of the chosen attribute from the entropy of the
# whole data set with respect to the target attribute (and return it)
main_entropy = entropy_func(data, class_attr, method=method)
# Prefer gains on attributes with fewer values.
if prefer_fewer_values:
# n = len(val_freq)
# w = (n+1)/float(n)/2
#return (main_entropy - subset_entropy)*w
return ((main_entropy - subset_entropy), 1./len(val_freq))
else:
return (main_entropy - subset_entropy) | 0.006074 |
def plotly_graph(
kmgraph,
graph_layout="kk",
colorscale=None,
showscale=True,
factor_size=3,
edge_linecolor="rgb(180,180,180)",
edge_linewidth=1.5,
node_linecolor="rgb(255,255,255)",
node_linewidth=1.0,
):
"""Generate Plotly data structures that represent the mapper graph
Parameters
----------
kmgraph: dict representing the mapper graph,
returned by the function get_mapper_graph()
graph_layout: igraph layout; recommended 'kk' (kamada-kawai)
or 'fr' (fruchterman-reingold)
colorscale: a Plotly colorscale(colormap) to color graph nodes
showscale: boolean to display or not the colorbar
factor_size: a factor for the node size
Returns
-------
The plotly traces (dicts) representing the graph edges and nodes
"""
if not colorscale:
colorscale = default_colorscale
# define an igraph.Graph instance of n_nodes
n_nodes = len(kmgraph["nodes"])
if n_nodes == 0:
raise ValueError("Your graph has 0 nodes")
G = ig.Graph(n=n_nodes)
links = [(e["source"], e["target"]) for e in kmgraph["links"]]
G.add_edges(links)
layt = G.layout(graph_layout)
hover_text = [node["name"] for node in kmgraph["nodes"]]
color_vals = [node["color"] for node in kmgraph["nodes"]]
node_size = np.array(
[factor_size * node["size"] for node in kmgraph["nodes"]], dtype=np.int
)
Xn, Yn, Xe, Ye = _get_plotly_data(links, layt)
edge_trace = dict(
type="scatter",
x=Xe,
y=Ye,
mode="lines",
line=dict(color=edge_linecolor, width=edge_linewidth),
hoverinfo="none",
)
node_trace = dict(
type="scatter",
x=Xn,
y=Yn,
mode="markers",
marker=dict(
size=node_size.tolist(),
color=color_vals,
opacity=1.0,
colorscale=colorscale,
showscale=showscale,
line=dict(color=node_linecolor, width=node_linewidth),
colorbar=dict(thickness=20, ticklen=4, x=1.01, tickfont=dict(size=10)),
),
text=hover_text,
hoverinfo="text",
)
return [edge_trace, node_trace] | 0.001305 |
def stats(self):
""" shotcut to pull out useful info for interactive use """
printDebug("Classes.....: %d" % len(self.all_classes))
printDebug("Properties..: %d" % len(self.all_properties)) | 0.00939 |
def pi0est(p_values, lambda_ = np.arange(0.05,1.0,0.05), pi0_method = "smoother", smooth_df = 3, smooth_log_pi0 = False):
""" Estimate pi0 according to bioconductor/qvalue """
# Compare to bioconductor/qvalue reference implementation
# import rpy2
# import rpy2.robjects as robjects
# from rpy2.robjects import pandas2ri
# pandas2ri.activate()
# smoothspline=robjects.r('smooth.spline')
# predict=robjects.r('predict')
p = np.array(p_values)
rm_na = np.isfinite(p)
p = p[rm_na]
m = len(p)
ll = 1
if isinstance(lambda_, np.ndarray ):
ll = len(lambda_)
lambda_ = np.sort(lambda_)
if (min(p) < 0 or max(p) > 1):
raise click.ClickException("p-values not in valid range [0,1].")
elif (ll > 1 and ll < 4):
raise click.ClickException("If lambda_ is not predefined (one value), at least four data points are required.")
elif (np.min(lambda_) < 0 or np.max(lambda_) >= 1):
raise click.ClickException("Lambda must be within [0,1)")
if (ll == 1):
pi0 = np.mean(p >= lambda_)/(1 - lambda_)
pi0_lambda = pi0
pi0 = np.minimum(pi0, 1)
pi0Smooth = False
else:
pi0 = []
for l in lambda_:
pi0.append(np.mean(p >= l)/(1 - l))
pi0_lambda = pi0
if (pi0_method == "smoother"):
if smooth_log_pi0:
pi0 = np.log(pi0)
spi0 = sp.interpolate.UnivariateSpline(lambda_, pi0, k=smooth_df)
pi0Smooth = np.exp(spi0(lambda_))
# spi0 = smoothspline(lambda_, pi0, df = smooth_df) # R reference function
# pi0Smooth = np.exp(predict(spi0, x = lambda_).rx2('y')) # R reference function
else:
spi0 = sp.interpolate.UnivariateSpline(lambda_, pi0, k=smooth_df)
pi0Smooth = spi0(lambda_)
# spi0 = smoothspline(lambda_, pi0, df = smooth_df) # R reference function
# pi0Smooth = predict(spi0, x = lambda_).rx2('y') # R reference function
pi0 = np.minimum(pi0Smooth[ll-1],1)
elif (pi0_method == "bootstrap"):
minpi0 = np.percentile(pi0,0.1)
W = []
for l in lambda_:
W.append(np.sum(p >= l))
mse = (np.array(W) / (np.power(m,2) * np.power((1 - lambda_),2))) * (1 - np.array(W) / m) + np.power((pi0 - minpi0),2)
pi0 = np.minimum(pi0[np.argmin(mse)],1)
pi0Smooth = False
else:
raise click.ClickException("pi0_method must be one of 'smoother' or 'bootstrap'.")
if (pi0<=0):
raise click.ClickException("The estimated pi0 <= 0. Check that you have valid p-values or use a different range of lambda.")
return {'pi0': pi0, 'pi0_lambda': pi0_lambda, 'lambda_': lambda_, 'pi0_smooth': pi0Smooth} | 0.011567 |
def phonetic_fingerprint(
phrase, phonetic_algorithm=double_metaphone, joiner=' ', *args, **kwargs
):
"""Return the phonetic fingerprint of a phrase.
This is a wrapper for :py:meth:`Phonetic.fingerprint`.
Parameters
----------
phrase : str
The string from which to calculate the phonetic fingerprint
phonetic_algorithm : function
A phonetic algorithm that takes a string and returns a string
(presumably a phonetic representation of the original string). By
default, this function uses :py:func:`.double_metaphone`.
joiner : str
The string that will be placed between each word
*args
Variable length argument list
**kwargs
Arbitrary keyword arguments
Returns
-------
str
The phonetic fingerprint of the phrase
Examples
--------
>>> phonetic_fingerprint('The quick brown fox jumped over the lazy dog.')
'0 afr fks jmpt kk ls prn tk'
>>> from abydos.phonetic import soundex
>>> phonetic_fingerprint('The quick brown fox jumped over the lazy dog.',
... phonetic_algorithm=soundex)
'b650 d200 f200 j513 l200 o160 q200 t000'
"""
return Phonetic().fingerprint(
phrase, phonetic_algorithm, joiner, *args, **kwargs
) | 0.000781 |
def console_user(username=False):
'''
Gets the UID or Username of the current console user.
:return: The uid or username of the console user.
:param bool username: Whether to return the username of the console
user instead of the UID. Defaults to False
:rtype: Interger of the UID, or a string of the username.
Raises:
CommandExecutionError: If we fail to get the UID.
CLI Example:
.. code-block:: bash
import salt.utils.mac_service
salt.utils.mac_service.console_user()
'''
try:
# returns the 'st_uid' stat from the /dev/console file.
uid = os.stat('/dev/console')[4]
except (OSError, IndexError):
# we should never get here but raise an error if so
raise CommandExecutionError('Failed to get a UID for the console user.')
if username:
return pwd.getpwuid(uid)[0]
return uid | 0.002212 |
def age(*paths):
'''Return the minimum age of a set of files.
Returns 0 if no paths are given.
Returns time.time() if a path does not exist.'''
if not paths:
return 0
for path in paths:
if not os.path.exists(path):
return time.time()
return min([(time.time() - os.path.getmtime(path)) for path in paths]) | 0.031348 |
def set_requests_per_second(self, req_per_second):
'''Adjusts the request/second at run-time'''
self.req_per_second = req_per_second
self.req_duration = 1 / self.req_per_second | 0.01 |
def _update_alpha(self, event=None):
"""Update display after a change in the alpha spinbox."""
a = self.alpha.get()
hexa = self.hexa.get()
hexa = hexa[:7] + ("%2.2x" % a).upper()
self.hexa.delete(0, 'end')
self.hexa.insert(0, hexa)
self.alphabar.set(a)
self._update_preview() | 0.0059 |
def install(plugin_name, *args, **kwargs):
'''
Install plugin packages based on specified Conda channels.
.. versionchanged:: 0.19.1
Do not save rollback info on dry-run.
.. versionchanged:: 0.24
Remove channels argument. Use Conda channels as configured in Conda
environment.
Note that channels can still be explicitly set through :data:`*args`.
Parameters
----------
plugin_name : str or list
Plugin package(s) to install.
Version specifiers are also supported, e.g., ``package >=1.0.5``.
*args
Extra arguments to pass to Conda ``install`` command.
Returns
-------
dict
Conda installation log object (from JSON Conda install output).
'''
if isinstance(plugin_name, types.StringTypes):
plugin_name = [plugin_name]
# Perform installation
conda_args = (['install', '-y', '--json'] + list(args) + plugin_name)
install_log_js = ch.conda_exec(*conda_args, verbose=False)
install_log = json.loads(install_log_js.split('\x00')[-1])
if 'actions' in install_log and not install_log.get('dry_run'):
# Install command modified Conda environment.
_save_action({'conda_args': conda_args, 'install_log': install_log})
logger.debug('Installed plugin(s): ```%s```', install_log['actions'])
return install_log | 0.000728 |
def get_value_product_unique(self, pos):
"""
Return all products unique relationship with POS's Storage (only salable zones)
"""
qs = ProductUnique.objects.filter(
box__box_structure__zone__storage__in=pos.storage_stock.filter(storage_zones__salable=True),
product_final=self
)
return qs | 0.011142 |
def to_world(self, shape, dst_crs=None):
"""Return the shape (provided in pixel coordinates) in world coordinates, as GeoVector."""
if dst_crs is None:
dst_crs = self.crs
shp = transform(shape, self.crs, dst_crs, dst_affine=self.affine)
return GeoVector(shp, dst_crs) | 0.009646 |
def align_to_sort_bam(fastq1, fastq2, aligner, data):
"""Align to the named genome build, returning a sorted BAM file.
"""
names = data["rgnames"]
align_dir_parts = [data["dirs"]["work"], "align", names["sample"]]
if data.get("disambiguate"):
align_dir_parts.append(data["disambiguate"]["genome_build"])
aligner_index = _get_aligner_index(aligner, data)
align_dir = utils.safe_makedir(os.path.join(*align_dir_parts))
ref_file = tz.get_in(("reference", "fasta", "base"), data)
if fastq1.endswith(".bam"):
data = _align_from_bam(fastq1, aligner, aligner_index, ref_file,
names, align_dir, data)
else:
data = _align_from_fastq(fastq1, fastq2, aligner, aligner_index, ref_file,
names, align_dir, data)
if data["work_bam"] and utils.file_exists(data["work_bam"]):
if data.get("align_split") and dd.get_mark_duplicates(data):
# If merging later with with bamsormadup need query sorted inputs
# but CWL requires a bai file. Create a fake one to make it happy.
bam.fake_index(data["work_bam"], data)
else:
bam.index(data["work_bam"], data["config"])
for extra in ["-sr", "-disc"]:
extra_bam = utils.append_stem(data['work_bam'], extra)
if utils.file_exists(extra_bam):
bam.index(extra_bam, data["config"])
return data | 0.001373 |
def send(self, jlink):
"""Starts the SWD transaction.
Steps for a Read Transaction:
1. First phase in which the request is sent.
2. Second phase in which an ACK is received. This phase consists of
three bits. An OK response has the value ``1``.
3. Once the ACK is received, the data phase can begin. Consists of
``32`` data bits followed by ``1`` parity bit calclulated based
on all ``32`` data bits.
4. After the data phase, the interface must be clocked for at least
eight cycles to clock the transaction through the SW-DP; this is
done by reading an additional eight bits (eight clocks).
Args:
self (ReadRequest): the ``ReadRequest`` instance
jlink (JLink): the ``JLink`` instance to use for write/read
Returns:
An ``Response`` instance.
"""
ack = super(ReadRequest, self).send(jlink)
# Write the read command, then read the data and status.
jlink.swd_write32(0x0, 0x0)
jlink.swd_write8(0xFC, 0x0)
status = jlink.swd_read8(ack) & 7
data = jlink.swd_read32(ack + 3)
if status == Response.STATUS_ACK:
# Check the parity
parity = jlink.swd_read8(ack + 35) & 1
if util.calculate_parity(data) != parity:
return Response(-1, data)
return Response(status, data) | 0.001365 |
def invert(self, src=None):
"""Calculate the inverted matrix. Return 0 if successful and replace
current one. Else return 1 and do nothing.
"""
if src is None:
dst = TOOLS._invert_matrix(self)
else:
dst = TOOLS._invert_matrix(src)
if dst[0] == 1:
return 1
self.a, self.b, self.c, self.d, self.e, self.f = dst[1]
return 0 | 0.004762 |
def export(self, name, columns, points):
"""Write the points to the ES server."""
logger.debug("Export {} stats to ElasticSearch".format(name))
# Create DB input
# https://elasticsearch-py.readthedocs.io/en/master/helpers.html
actions = []
for c, p in zip(columns, points):
dtnow = datetime.utcnow()
action = {
"_index": self.index,
"_id": '{}.{}'.format(name,c),
"_type": "glances",
"_source": {
"plugin": name,
"metric": c,
"value": str(p),
"timestamp": dtnow.isoformat('T')
}
}
logger.debug("Exporting the following object to elasticsearch: {}".format(action))
actions.append(action)
# Write input to the ES index
try:
helpers.bulk(self.client, actions)
except Exception as e:
logger.error("Cannot export {} stats to ElasticSearch ({})".format(name, e)) | 0.004655 |
def newKey(a, b, k):
""" Try to find two large pseudo primes roughly between a and b.
Generate public and private keys for RSA encryption.
Raises ValueError if it fails to find one"""
try:
p = findAPrime(a, b, k)
while True:
q = findAPrime(a, b, k)
if q != p:
break
except:
raise ValueError
n = p * q
m = (p - 1) * (q - 1)
while True:
e = random.randint(1, m)
if coPrime([e, m]):
break
d = modInv(e, m)
return (n, e, d) | 0.00361 |
def fast_gradient_method(model_fn, x, eps, ord, clip_min=None, clip_max=None, y=None,
targeted=False, sanity_checks=False):
"""
Tensorflow 2.0 implementation of the Fast Gradient Method.
:param model_fn: a callable that takes an input tensor and returns the model logits.
:param x: input tensor.
:param eps: epsilon (input variation parameter); see https://arxiv.org/abs/1412.6572.
:param ord: Order of the norm (mimics NumPy). Possible values: np.inf, 1 or 2.
:param clip_min: (optional) float. Minimum float value for adversarial example components.
:param clip_max: (optional) float. Maximum float value for adversarial example components.
:param y: (optional) Tensor with true labels. If targeted is true, then provide the
target label. Otherwise, only provide this parameter if you'd like to use true
labels when crafting adversarial samples. Otherwise, model predictions are used
as labels to avoid the "label leaking" effect (explained in this paper:
https://arxiv.org/abs/1611.01236). Default is None.
:param targeted: (optional) bool. Is the attack targeted or untargeted?
Untargeted, the default, will try to make the label incorrect.
Targeted will instead try to move in the direction of being more like y.
:param sanity_checks: bool, if True, include asserts (Turn them off to use less runtime /
memory or for unit tests that intentionally pass strange input)
:return: a tensor for the adversarial example
"""
if ord not in [np.inf, 1, 2]:
raise ValueError("Norm order must be either np.inf, 1, or 2.")
asserts = []
# If a data range was specified, check that the input was in that range
if clip_min is not None:
asserts.append(tf.math.greater_equal(x, clip_min))
if clip_max is not None:
asserts.append(tf.math.less_equal(x, clip_max))
if y is None:
# Using model predictions as ground truth to avoid label leaking
y = tf.argmax(model_fn(x), 1)
grad = compute_gradient(model_fn, x, y, targeted)
optimal_perturbation = optimize_linear(grad, eps, ord)
# Add perturbation to original example to obtain adversarial example
adv_x = x + optimal_perturbation
# If clipping is needed, reset all values outside of [clip_min, clip_max]
if (clip_min is not None) or (clip_max is not None):
# We don't currently support one-sided clipping
assert clip_min is not None and clip_max is not None
adv_x = tf.clip_by_value(adv_x, clip_min, clip_max)
if sanity_checks:
assert np.all(asserts)
return adv_x | 0.010749 |
def human_duration(duration_seconds: float) -> str:
"""Convert a duration in seconds into a human friendly string."""
if duration_seconds < 0.001:
return '0 ms'
if duration_seconds < 1:
return '{} ms'.format(int(duration_seconds * 1000))
return '{} s'.format(int(duration_seconds)) | 0.003195 |
def filter_data(data, kernel, mode='constant', fill_value=0.0,
check_normalization=False):
"""
Convolve a 2D image with a 2D kernel.
The kernel may either be a 2D `~numpy.ndarray` or a
`~astropy.convolution.Kernel2D` object.
Parameters
----------
data : array_like
The 2D array of the image.
kernel : array-like (2D) or `~astropy.convolution.Kernel2D`
The 2D kernel used to filter the input ``data``. Filtering the
``data`` will smooth the noise and maximize detectability of
objects with a shape similar to the kernel.
mode : {'constant', 'reflect', 'nearest', 'mirror', 'wrap'}, optional
The ``mode`` determines how the array borders are handled. For
the ``'constant'`` mode, values outside the array borders are
set to ``fill_value``. The default is ``'constant'``.
fill_value : scalar, optional
Value to fill data values beyond the array borders if ``mode``
is ``'constant'``. The default is ``0.0``.
check_normalization : bool, optional
If `True` then a warning will be issued if the kernel is not
normalized to 1.
"""
from scipy import ndimage
if kernel is not None:
if isinstance(kernel, Kernel2D):
kernel_array = kernel.array
else:
kernel_array = kernel
if check_normalization:
if not np.allclose(np.sum(kernel_array), 1.0):
warnings.warn('The kernel is not normalized.',
AstropyUserWarning)
# NOTE: astropy.convolution.convolve fails with zero-sum
# kernels (used in findstars) (cf. astropy #1647)
# NOTE: if data is int and kernel is float, ndimage.convolve
# will return an int image - here we make the data float so
# that a float image is always returned
return ndimage.convolve(data.astype(float), kernel_array, mode=mode,
cval=fill_value)
else:
return data | 0.000489 |
def req(self, method, params=()):
"""send request to ppcoind"""
response = self.session.post(
self.url,
data=json.dumps({"method": method, "params": params, "jsonrpc": "1.1"}),
).json()
if response["error"] is not None:
return response["error"]
else:
return response["result"] | 0.008197 |
def loudest_triggers_from_cli(opts, coinc_parameters=None,
sngl_parameters=None, bank_parameters=None):
""" Parses the CLI options related to find the loudest coincident or
single detector triggers.
Parameters
----------
opts : object
Result of parsing the CLI with OptionParser.
coinc_parameters : list
List of datasets in statmap file to retrieve.
sngl_parameters : list
List of datasets in single-detector trigger files to retrieve.
bank_parameters : list
List of datasets in template bank file to retrieve.
Results
-------
bin_names : dict
A list of bin names.
bin_results : dict
A list of dict holding trigger data data.
"""
# list to hold trigger data
bin_results = []
# list of IFOs
ifos = opts.sngl_trigger_files.keys()
# get indices of bins in template bank
bins_idx, bank_data = bank_bins_from_cli(opts)
bin_names = bins_idx.keys()
# if taking triggers from statmap file
if opts.statmap_file and opts.bank_file and opts.sngl_trigger_files:
# loop over each bin
for bin_name in bin_names:
data = {}
# get template has and detection statistic for coincident events
statmap = hdf.ForegroundTriggers(
opts.statmap_file, opts.bank_file,
sngl_files=opts.sngl_trigger_files.values(),
n_loudest=opts.search_n_loudest,
group=opts.statmap_group)
template_hash = statmap.get_bankfile_array("template_hash")
stat = statmap.get_coincfile_array("stat")
# get indices of triggers in bin
bin_idx = numpy.in1d(template_hash,
bank_data["template_hash"][bins_idx[bin_name]])
# get indices for sorted detection statistic in bin
sorting = stat[bin_idx].argsort()[::-1]
# get variables for n-th loudest triggers
for p in coinc_parameters:
arr = statmap.get_coincfile_array(p)
data[p] = arr[bin_idx][sorting][:opts.n_loudest]
for p in sngl_parameters:
for ifo in ifos:
key = "/".join([ifo, p])
arr = statmap.get_snglfile_array_dict(p)[ifo]
data[key] = arr[bin_idx][sorting][:opts.n_loudest]
for p in bank_parameters:
arr = statmap.get_bankfile_array(p)
data[p] = arr[bin_idx][sorting][:opts.n_loudest]
# append results
bin_results.append(data)
# if taking triggers from single detector file
elif opts.bank_file and opts.sngl_trigger_files:
# loop over each bin
for bin_name in bin_names:
data = {}
# only use one IFO
if len(opts.sngl_trigger_files.keys()) == 1:
ifo = opts.sngl_trigger_files.keys()[0]
else:
raise ValueError("Too many IFOs")
# get newSNR as statistic from single detector files
sngls = hdf.SingleDetTriggers(opts.sngl_trigger_files[ifo],
opts.bank_file, opts.veto_file,
opts.veto_segment_name, None, ifo)
# cluster
n_loudest = opts.search_n_loudest \
if opts.search_n_loudest else len(sngls.template_id)
sngls.mask_to_n_loudest_clustered_events(n_loudest=n_loudest)
template_hash = \
sngls.bank["template_hash"][:][sngls.template_id]
# get indices of triggers in bin
bin_idx = numpy.in1d(template_hash,
bank_data["template_hash"][bins_idx[bin_name]])
# sort by detection statistic
stats = sngls.stat
sorting = stats[bin_idx].argsort()[::-1]
# get indices for sorted detection statistic in bin
for p in sngl_parameters:
key = "/".join([ifo, p])
arr = sngls.get_column(p)
data[key] = arr[bin_idx][sorting][:opts.n_loudest]
for p in bank_parameters:
arr = sngls.bank[p][:]
data[p] = \
arr[sngls.template_id][bin_idx][sorting][:opts.n_loudest]
# append results
bin_results.append(data)
# else did not supply enough command line options
else:
raise ValueError("Must have --bank-file and --sngl-trigger-files")
return bin_names, bin_results | 0.001488 |
async def submit_action(pool_handle: int,
request_json: str,
nodes: Optional[str],
timeout: Optional[int]) -> str:
"""
Send action to particular nodes of validator pool.
The list of requests can be send:
POOL_RESTART
GET_VALIDATOR_INFO
The request is sent to the nodes as is. It's assumed that it's already prepared.
:param pool_handle: pool handle (created by open_pool_ledger).
:param request_json: Request data json.
:param nodes: (Optional) List of node names to send the request.
["Node1", "Node2",...."NodeN"]
:param timeout: (Optional) Time to wait respond from nodes (override the default timeout) (in sec).
:return: Request result as json.
"""
logger = logging.getLogger(__name__)
logger.debug("submit_action: >>> pool_handle: %r, request_json: %r, nodes: %r, timeout: %r",
pool_handle,
request_json,
nodes,
timeout)
if not hasattr(submit_action, "cb"):
logger.debug("submit_action: Creating callback")
submit_action.cb = create_cb(CFUNCTYPE(None, c_int32, c_int32, c_char_p))
c_pool_handle = c_int32(pool_handle)
c_request_json = c_char_p(request_json.encode('utf-8'))
c_nodes = c_char_p(nodes.encode('utf-8')) if nodes is not None else None
c_timeout = c_int32(timeout) if timeout is not None else None
request_result = await do_call('indy_submit_action',
c_pool_handle,
c_request_json,
c_nodes,
c_timeout,
submit_action.cb)
res = request_result.decode()
logger.debug("submit_action: <<< res: %r", res)
return res | 0.003721 |
def get_resource(resource_name):
"""
Return a resource in current directory or in frozen package
"""
resource_path = None
if hasattr(sys, "frozen"):
resource_path = os.path.normpath(os.path.join(os.path.dirname(sys.executable), resource_name))
elif not hasattr(sys, "frozen") and pkg_resources.resource_exists("gns3server", resource_name):
resource_path = pkg_resources.resource_filename("gns3server", resource_name)
resource_path = os.path.normpath(resource_path)
return resource_path | 0.007435 |
def load(cls, data, promote=False):
"""Create a new ent from an existing value. The value must either
be an instance of Ent, or must be an instance of SAFE_TYPES. If
the value is a base type (bool, int, string, etc), it will just be
returned. Iterable types will be loaded recursively, transforming
dictionaries into Ent instances, but otherwise maintaining the
hierarchy of the input data."""
t = type(data)
if t == cls:
# same class, create new copy
return cls({k: cls.load(v, promote)
for k, v in data.__dict__.items()})
elif isinstance(data, cls):
# child class, always use directly
return data.copy()
elif isinstance(data, Ent):
# parent class, promote or preserve
if promote:
return cls({k: cls.load(v, promote)
for k, v in data.__dict__.items()})
else:
return data.copy()
elif t not in SAFE_TYPES:
return None
elif t in (tuple, list, set):
return t(cls.load(i) for i in data)
elif t == dict:
return cls({k: cls.load(v) for k, v in data.items()})
else:
return data | 0.001529 |
def _get_cf_grid_mapping_var(self):
"""Figure out which grid mapping should be used"""
gmaps = ['fixedgrid_projection', 'goes_imager_projection',
'lambert_projection', 'polar_projection',
'mercator_projection']
if 'grid_mapping' in self.filename_info:
gmaps = [self.filename_info.get('grid_mapping')] + gmaps
for grid_mapping in gmaps:
if grid_mapping in self.nc:
return self.nc[grid_mapping]
raise KeyError("Can't find grid mapping variable in SCMI file") | 0.003509 |
def knapsack(p, v, cmax):
"""Knapsack problem: select maximum value set of items if total size not more than capacity
:param p: table with size of items
:param v: table with value of items
:param cmax: capacity of bag
:requires: number of items non-zero
:returns: value optimal solution, list of item indexes in solution
:complexity: O(n * cmax), for n = number of items
"""
n = len(p)
opt = [[0] * (cmax + 1) for _ in range(n + 1)]
sel = [[False] * (cmax + 1) for _ in range(n + 1)]
# --- basic case
for cap in range(p[0], cmax + 1):
opt[0][cap] = v[0]
sel[0][cap] = True
# --- induction case
for i in range(1, n):
for cap in range(cmax + 1):
if cap >= p[i] and opt[i-1][cap - p[i]] + v[i] > opt[i-1][cap]:
opt[i][cap] = opt[i-1][cap - p[i]] + v[i]
sel[i][cap] = True
else:
opt[i][cap] = opt[i-1][cap]
sel[i][cap] = False
# --- reading solution
cap = cmax
solution = []
for i in range(n-1, -1, -1):
if sel[i][cap]:
solution.append(i)
cap -= p[i]
return (opt[n - 1][cmax], solution) | 0.001542 |
def shape(self):
"""Tuple of array dimensions.
Examples
--------
>>> x = mx.nd.array([1, 2, 3, 4])
>>> x.shape
(4L,)
>>> y = mx.nd.zeros((2, 3, 4))
>>> y.shape
(2L, 3L, 4L)
"""
ndim = mx_int()
pdata = ctypes.POINTER(mx_int)()
check_call(_LIB.MXNDArrayGetShapeEx(
self.handle, ctypes.byref(ndim), ctypes.byref(pdata)))
if ndim.value == -1:
return None
else:
return tuple(pdata[:ndim.value]) | 0.003663 |
def wrap_get_user(cls, response):
"""Wrap the response from getting a user into an instance
and return it
:param response: The response from getting a user
:type response: :class:`requests.Response`
:returns: the new user instance
:rtype: :class:`list` of :class:`User`
:raises: None
"""
json = response.json()
u = cls.wrap_json(json)
return u | 0.00463 |
def get_header(headers, name, default=None):
"""Return the value of header *name*.
The *headers* argument must be a list of ``(name, value)`` tuples. If the
header is found its associated value is returned, otherwise *default* is
returned. Header names are matched case insensitively.
"""
name = name.lower()
for header in headers:
if header[0].lower() == name:
return header[1]
return default | 0.002242 |
def make_article_info_copyright(self, article_info_div):
"""
Makes the copyright section for the ArticleInfo. For PLoS, this means
handling the information contained in the metadata <permissions>
element.
"""
perm = self.article.root.xpath('./front/article-meta/permissions')
if not perm:
return
copyright_div = etree.SubElement(article_info_div, 'div', {'id': 'copyright'})
cp_bold = etree.SubElement(copyright_div, 'b')
cp_bold.text = 'Copyright: '
copyright_string = '\u00A9 '
copyright_holder = perm[0].find('copyright-holder')
if copyright_holder is not None:
copyright_string += all_text(copyright_holder) + '. '
lic = perm[0].find('license')
if lic is not None:
copyright_string += all_text(lic.find('license-p'))
append_new_text(copyright_div, copyright_string) | 0.003215 |
def remove_duplicates(apps, schema_editor):
"""
Remove any duplicates from the entity relationship table
:param apps:
:param schema_editor:
:return:
"""
# Get the model
EntityRelationship = apps.get_model('entity', 'EntityRelationship')
# Find the duplicates
duplicates = EntityRelationship.objects.all().order_by(
'sub_entity_id',
'super_entity_id'
).values(
'sub_entity_id',
'super_entity_id'
).annotate(
Count('sub_entity_id'),
Count('super_entity_id'),
max_id=Max('id')
).filter(
super_entity_id__count__gt=1
)
# Loop over the duplicates and delete
for duplicate in duplicates:
EntityRelationship.objects.filter(
sub_entity_id=duplicate['sub_entity_id'],
super_entity_id=duplicate['super_entity_id']
).exclude(
id=duplicate['max_id']
).delete() | 0.001065 |
def data_filler_simple_registration(self, number_of_rows, conn):
'''creates and fills the table with simple regis. information
'''
cursor = conn.cursor()
cursor.execute('''
CREATE TABLE simple_registration(id TEXT PRIMARY KEY,
email TEXT , password TEXT)
''')
conn.commit()
multi_lines = []
try:
for i in range(0, number_of_rows):
multi_lines.append((rnd_id_generator(self), self.faker.safe_email(), self.faker.md5(raw_output=False)))
cursor.executemany('insert into simple_registration values(?,?,?)',multi_lines)
conn.commit()
logger.warning('simple_registration Commits are successful after write job!', extra=d)
except Exception as e:
logger.error(e, extra=d) | 0.008363 |
def parse_tables(self):
"""
Parse and return all tables from the DOM.
Returns
-------
list of parsed (header, body, footer) tuples from tables.
"""
tables = self._parse_tables(self._build_doc(), self.match, self.attrs)
return (self._parse_thead_tbody_tfoot(table) for table in tables) | 0.005731 |
def _update_Prxy_diag(self):
"""Update `D`, `A`, `Ainv` from `Prxy`, `prx`."""
for r in range(self.nsites):
pr_half = self.prx[r]**0.5
pr_neghalf = self.prx[r]**-0.5
#symm_pr = scipy.dot(scipy.diag(pr_half), scipy.dot(self.Prxy[r], scipy.diag(pr_neghalf)))
symm_pr = (pr_half * (self.Prxy[r] * pr_neghalf).transpose()).transpose()
# assert scipy.allclose(symm_pr, symm_pr.transpose())
(evals, evecs) = scipy.linalg.eigh(symm_pr)
# assert scipy.allclose(scipy.linalg.inv(evecs), evecs.transpose())
# assert scipy.allclose(symm_pr, scipy.dot(evecs, scipy.dot(scipy.diag(evals), evecs.transpose())))
self.D[r] = evals
self.Ainv[r] = evecs.transpose() * pr_half
self.A[r] = (pr_neghalf * evecs.transpose()).transpose() | 0.006961 |
def _build_dist(self, spec, label, dist, **kwargs):
''' Build and return a PyMC3 Distribution. '''
if isinstance(dist, string_types):
if hasattr(pm, dist):
dist = getattr(pm, dist)
elif dist in self.dists:
dist = self.dists[dist]
else:
raise ValueError("The Distribution class '%s' was not "
"found in PyMC3 or the PyMC3BackEnd." % dist)
# Inspect all args in case we have hyperparameters
def _expand_args(k, v, label):
if isinstance(v, Prior):
label = '%s_%s' % (label, k)
return self._build_dist(spec, label, v.name, **v.args)
return v
kwargs = {k: _expand_args(k, v, label) for (k, v) in kwargs.items()}
# Non-centered parameterization for hyperpriors
if spec.noncentered and 'sd' in kwargs and 'observed' not in kwargs \
and isinstance(kwargs['sd'], pm.model.TransformedRV):
old_sd = kwargs['sd']
_offset = pm.Normal(label + '_offset', mu=0, sd=1,
shape=kwargs['shape'])
return pm.Deterministic(label, _offset * old_sd)
return dist(label, **kwargs) | 0.001566 |
def first_field(self):
""" Returns the first :class:`Field` in the `Sequence` or ``None``
for an empty `Sequence`.
"""
for name, item in enumerate(self):
# Container
if is_container(item):
field = item.first_field()
# Container is not empty
if field is not None:
return field
# Field
elif is_field(item):
return item
else:
raise MemberTypeError(self, item, name)
return None | 0.003484 |
def get_repo_url(pypirc, repository):
"""Fetch the RepositoryURL for a given repository, reading info from pypirc.
Will try to find the repository in the .pypirc, including username/password.
Args:
pypirc (str): path to the .pypirc config file
repository (str): URL or alias for the repository
Returns:
base.RepositoryURL for the repository
"""
pypirc = os.path.abspath(os.path.expanduser(pypirc))
pypi_config = base.PyPIConfig(pypirc)
repo_config = pypi_config.get_repo_config(repository)
if repo_config:
return repo_config.get_clean_url()
else:
return base.RepositoryURL(repository) | 0.004498 |
def prepare(self, pseudocount=0.0, lenfile=None, read_length=100):
"""
Initializes the probability of read origin according to the alignment profile
:param pseudocount: Uniform prior for allele specificity estimation
:return: Nothing (as it performs an in-place operations)
"""
if self.probability.num_groups > 0:
self.grp_conv_mat = lil_matrix((self.probability.num_loci, self.probability.num_groups))
for i in xrange(self.probability.num_groups):
self.grp_conv_mat[self.probability.groups[i], i] = 1.0
self.grp_conv_mat = self.grp_conv_mat.tocsc()
self.t2t_mat = eye(self.probability.num_loci, self.probability.num_loci)
self.t2t_mat = self.t2t_mat.tolil()
for tid_list in self.probability.groups:
for ii in xrange(len(tid_list)):
for jj in xrange(ii):
i = tid_list[ii]
j = tid_list[jj]
self.t2t_mat[i, j] = 1
self.t2t_mat[j, i] = 1
self.t2t_mat = self.t2t_mat.tocsc()
if lenfile is not None:
hid = dict(zip(self.probability.hname, np.arange(len(self.probability.hname))))
self.target_lengths = np.zeros((self.probability.num_loci, self.probability.num_haplotypes))
if self.probability.num_haplotypes > 1:
with open(lenfile) as fh:
for curline in fh:
item = curline.rstrip().split("\t")
locus, hap = item[0].split("_")
self.target_lengths[self.probability.lid[locus], hid[hap]] = max(float(item[1]) - read_length + 1.0, 1.0)
elif self.probability.num_haplotypes > 0:
with open(lenfile) as fh:
for curline in fh:
item = curline.rstrip().split("\t")
self.target_lengths[self.probability.lid[item[0]], 0] = max(float(item[1]) - read_length + 1.0, 1.0)
else:
raise RuntimeError('There is something wrong with your emase-format alignment file.')
self.target_lengths = self.target_lengths.transpose()
#self.target_lengths = self.target_lengths.transpose() / read_length # lengths in terms of read counts
if not np.all(self.target_lengths > 0.0):
raise RuntimeError('There exist transcripts missing length information.')
self.probability.normalize_reads(axis=APM.Axis.READ) # Initialize alignment probability matrix
self.allelic_expression = self.probability.sum(axis=APM.Axis.READ)
if self.target_lengths is not None: # allelic_expression will be at depth-level
self.allelic_expression = np.divide(self.allelic_expression, self.target_lengths)
if pseudocount > 0.0: # pseudocount is at depth-level
orig_allelic_expression_sum = self.allelic_expression.sum()
nzloci = np.nonzero(self.allelic_expression)[1]
self.allelic_expression[:, nzloci] += pseudocount
self.allelic_expression *= (orig_allelic_expression_sum / self.allelic_expression.sum()) | 0.005239 |
def pass_q_v1(self):
"""Update the outlet link sequence."""
flu = self.sequences.fluxes.fastaccess
out = self.sequences.outlets.fastaccess
out.q[0] += flu.qa | 0.00578 |
def equality(self, other):
"""Compare two objects for equality.
@param self: first object to compare
@param other: second object to compare
@return: boolean result of comparison
"""
# Compare specified attributes for equality
cname = self.__class__.__name__
for aname in self.attributes:
try:
attr1 = getattr(self, aname)
attr2 = getattr(other, aname)
except AttributeError as error:
logging.debug("%s.%s: %s", cname, aname, error)
return False
self.log(attr1, attr2, '==', cname=cname, aname=aname)
eql = (attr1 == attr2)
self.log(attr1, attr2, '==', cname=cname, aname=aname, result=eql)
if not eql:
return False
return True | 0.002342 |
def load_or_create_vocabs(source_paths: List[str],
target_path: str,
source_vocab_paths: List[Optional[str]],
target_vocab_path: Optional[str],
shared_vocab: bool,
num_words_source: Optional[int], word_min_count_source: int,
num_words_target: Optional[int], word_min_count_target: int,
pad_to_multiple_of: Optional[int] = None) -> Tuple[List[Vocab], Vocab]:
"""
Returns vocabularies for source files (including factors) and target.
If the respective vocabulary paths are not None, the vocabulary is read from the path and returned.
Otherwise, it is built from the support and saved to the path.
:param source_paths: The path to the source text (and optional token-parallel factor files).
:param target_path: The target text.
:param source_vocab_paths: The source vocabulary path (and optional factor vocabulary paths).
:param target_vocab_path: The target vocabulary path.
:param shared_vocab: Whether the source and target vocabularies are shared.
:param num_words_source: Number of words in the source vocabulary.
:param word_min_count_source: Minimum frequency of words in the source vocabulary.
:param num_words_target: Number of words in the target vocabulary.
:param word_min_count_target: Minimum frequency of words in the target vocabulary.
:param pad_to_multiple_of: If not None, pads the vocabularies to a size that is the next multiple of this int.
:return: List of source vocabularies (for source and factors), and target vocabulary.
"""
source_path, *source_factor_paths = source_paths
source_vocab_path, *source_factor_vocab_paths = source_vocab_paths
logger.info("=============================")
logger.info("Loading/creating vocabularies")
logger.info("=============================")
logger.info("(1) Surface form vocabularies (source & target)")
if shared_vocab:
if source_vocab_path and target_vocab_path:
vocab_source = vocab_from_json(source_vocab_path)
vocab_target = vocab_from_json(target_vocab_path)
utils.check_condition(are_identical(vocab_source, vocab_target),
"Shared vocabulary requires identical source and target vocabularies. "
"The vocabularies in %s and %s are not identical." % (source_vocab_path,
target_vocab_path))
elif source_vocab_path is None and target_vocab_path is None:
utils.check_condition(num_words_source == num_words_target,
"A shared vocabulary requires the number of source and target words to be the same.")
utils.check_condition(word_min_count_source == word_min_count_target,
"A shared vocabulary requires the minimum word count for source and target "
"to be the same.")
vocab_source = vocab_target = build_from_paths(paths=[source_path, target_path],
num_words=num_words_source,
min_count=word_min_count_source,
pad_to_multiple_of=pad_to_multiple_of)
else:
vocab_path = source_vocab_path if source_vocab_path is not None else target_vocab_path
logger.info("Using %s as a shared source/target vocabulary." % vocab_path)
vocab_source = vocab_target = vocab_from_json(vocab_path)
else:
vocab_source = load_or_create_vocab(source_path, source_vocab_path, num_words_source, word_min_count_source,
pad_to_multiple_of=pad_to_multiple_of)
vocab_target = load_or_create_vocab(target_path, target_vocab_path, num_words_target, word_min_count_target,
pad_to_multiple_of=pad_to_multiple_of)
vocab_source_factors = [] # type: List[Vocab]
if source_factor_paths:
logger.info("(2) Additional source factor vocabularies")
# source factor vocabs are always created
for factor_path, factor_vocab_path in zip(source_factor_paths, source_factor_vocab_paths):
vocab_source_factors.append(load_or_create_vocab(factor_path, factor_vocab_path,
num_words_source, word_min_count_source))
return [vocab_source] + vocab_source_factors, vocab_target | 0.006321 |
def create_index(config):
"""Create the root index."""
filename = pathlib.Path(config.cache_path) / "index.json"
index = {"version": __version__}
with open(filename, "w") as out:
out.write(json.dumps(index, indent=2)) | 0.004149 |
def discover_package_doc_dir(initial_dir):
"""Discover the ``doc/`` dir of a package given an initial directory.
Parameters
----------
initial_dir : `str`
The inititial directory to search from. In practice, this is often the
directory that the user is running the package-docs CLI from. This
directory needs to be somewhere inside the package's repository.
Returns
-------
root_dir : `str`
The root documentation directory (``doc/``), containing ``conf.py``.
Raises
------
FileNotFoundError
Raised if a ``conf.py`` file is not found in the initial directory,
or any parents, or in a ```doc/`` subdirectory.
"""
# Create an absolute Path to work with
initial_dir = pathlib.Path(initial_dir).resolve()
# Check if this is the doc/ dir already with a conf.py
if _has_conf_py(initial_dir):
return str(initial_dir)
# Search for a doc/ directory in cwd (this covers the case of running
# the CLI from the root of a repository).
test_dir = initial_dir / 'doc'
if test_dir.exists() and test_dir.is_dir():
if _has_conf_py(test_dir):
return str(test_dir)
# Search upwards until a conf.py is found
try:
return str(_search_parents(initial_dir))
except FileNotFoundError:
raise | 0.000741 |
def replace(self, old_patch, new_patch):
""" Replace old_patch with new_patch
The method only replaces the patch and doesn't change any comments.
"""
self._check_patch(old_patch)
old_patchline = self.patch2line[old_patch]
index = self.patchlines.index(old_patchline)
self.patchlines.pop(index)
new_patchline = PatchLine(new_patch)
new_patchline.set_comment(old_patchline.get_comment())
self.patchlines.insert(index, new_patchline)
del self.patch2line[old_patch]
self.patch2line[new_patch] = new_patchline | 0.003333 |
def region_screenshot(self, filename=None):
"""Deprecated
Take part of the screenshot
"""
# warnings.warn("deprecated, use screenshot().crop(bounds) instead", DeprecationWarning)
screen = self.__last_screen if self.__keep_screen else self.screenshot()
if self.bounds:
screen = screen.crop(self.bounds)
if filename:
screen.save(filename)
return screen | 0.009132 |
def set_server(self, pos, key, value):
"""Set the key to the value for the pos (position in the list)."""
self._web_list[pos][key] = value | 0.012987 |
def pause(jid, state_id=None, duration=None):
'''
Set up a state id pause, this instructs a running state to pause at a given
state id. This needs to pass in the jid of the running state and can
optionally pass in a duration in seconds.
'''
minion = salt.minion.MasterMinion(__opts__)
minion.functions['state.pause'](jid, state_id, duration) | 0.00271 |
def equals(self, other):
"""
Ensures :attr:`subject` is equal to *other*.
"""
self._run(unittest_case.assertEqual, (self._subject, other))
return ChainInspector(self._subject) | 0.009302 |
def push_dir(path, glob=None, upload_path=None):
'''
Push a directory from the minion up to the master, the files will be saved
to the salt master in the master's minion files cachedir (defaults to
``/var/cache/salt/master/minions/minion-id/files``). It also has a glob
for matching specific files using globbing.
.. versionadded:: 2014.7.0
Since this feature allows a minion to push files up to the master server it
is disabled by default for security purposes. To enable, set ``file_recv``
to ``True`` in the master configuration file, and restart the master.
upload_path
Provide a different path and directory name inside the master's minion
files cachedir
CLI Example:
.. code-block:: bash
salt '*' cp.push /usr/lib/mysql
salt '*' cp.push /usr/lib/mysql upload_path='/newmysql/path'
salt '*' cp.push_dir /etc/modprobe.d/ glob='*.conf'
'''
if '../' in path or not os.path.isabs(path):
return False
tmpupload_path = upload_path
path = os.path.realpath(path)
if os.path.isfile(path):
return push(path, upload_path=upload_path)
else:
filelist = []
for root, _, files in salt.utils.path.os_walk(path):
filelist += [os.path.join(root, tmpfile) for tmpfile in files]
if glob is not None:
filelist = [fi for fi in filelist if fnmatch.fnmatch(os.path.basename(fi), glob)]
if not filelist:
return False
for tmpfile in filelist:
if upload_path and tmpfile.startswith(path):
tmpupload_path = os.path.join(os.path.sep,
upload_path.strip(os.path.sep),
tmpfile.replace(path, '')
.strip(os.path.sep))
ret = push(tmpfile, upload_path=tmpupload_path)
if not ret:
return ret
return True | 0.001004 |
def cacheback(lifetime=None, fetch_on_miss=None, cache_alias=None,
job_class=None, task_options=None, **job_class_kwargs):
"""
Decorate function to cache its return value.
:lifetime: How long to cache items for
:fetch_on_miss: Whether to perform a synchronous fetch when no cached
result is found
:cache_alias: The Django cache alias to store the result into.
:job_class: The class to use for running the cache refresh job. Defaults
using the FunctionJob.
:job_class_kwargs: Any extra kwargs to pass to job_class constructor.
Useful with custom job_class implementations.
"""
if job_class is None:
job_class = FunctionJob
job = job_class(lifetime=lifetime, fetch_on_miss=fetch_on_miss,
cache_alias=cache_alias, task_options=task_options,
**job_class_kwargs)
def _wrapper(fn):
# using available_attrs to work around http://bugs.python.org/issue3445
@wraps(fn, assigned=available_attrs(fn))
def __wrapper(*args, **kwargs):
return job.get(fn, *args, **kwargs)
# Assign reference to unwrapped function so that we can access it
# later without descending into infinite regress.
__wrapper.fn = fn
# Assign reference to job so we can use the full Job API
__wrapper.job = job
return __wrapper
return _wrapper | 0.000686 |
def print_chain_summary(self, stream=sys.stdout, indent=""):
"""Print a summary of the files in this file dict.
This version uses chain_input_files and chain_output_files to
count the input and output files.
"""
stream.write("%sTotal files : %i\n" %
(indent, len(self.file_dict)))
stream.write("%s Input files : %i\n" %
(indent, len(self.chain_input_files)))
stream.write("%s Output files : %i\n" %
(indent, len(self.chain_output_files)))
stream.write("%s Internal files : %i\n" %
(indent, len(self.internal_files)))
stream.write("%s Temp files : %i\n" %
(indent, len(self.temp_files))) | 0.002558 |
def dict_to_pendulum(d: Dict[str, Any],
pendulum_class: ClassType) -> DateTime:
"""
Converts a ``dict`` object back to a ``Pendulum``.
"""
return pendulum.parse(d['iso']) | 0.004831 |
def locate(command, on):
"""Locate the command's man page."""
location = find_page_location(command, on)
click.echo(location) | 0.007299 |
def workbench_scenarios(cls):
"""
Gather scenarios to be displayed in the workbench
"""
module = cls.__module__
module = module.split('.')[0]
directory = pkg_resources.resource_filename(module, 'scenarios')
files = _find_files(directory)
scenarios = _read_files(files)
return scenarios | 0.005602 |
def async_do(self, size=10):
"""Execute all asynchronous jobs and wait for them to finish. By default it will run on 10 threads.
:param size: number of threads to run on.
"""
if hasattr(self._session, '_async_jobs'):
logging.info("Executing asynchronous %s jobs found in queue by using %s threads..." % (
len(self._session._async_jobs), size))
threaded_requests.map(self._session._async_jobs, size=size) | 0.008403 |
def set_cookie(self, name: str, value: str, *,
expires: Optional[str]=None,
domain: Optional[str]=None,
max_age: Optional[Union[int, str]]=None,
path: str='/',
secure: Optional[str]=None,
httponly: Optional[str]=None,
version: Optional[str]=None) -> None:
"""Set or update response cookie.
Sets new cookie or updates existent with new value.
Also updates only those params which are not None.
"""
old = self._cookies.get(name)
if old is not None and old.coded_value == '':
# deleted cookie
self._cookies.pop(name, None)
self._cookies[name] = value
c = self._cookies[name]
if expires is not None:
c['expires'] = expires
elif c.get('expires') == 'Thu, 01 Jan 1970 00:00:00 GMT':
del c['expires']
if domain is not None:
c['domain'] = domain
if max_age is not None:
c['max-age'] = str(max_age)
elif 'max-age' in c:
del c['max-age']
c['path'] = path
if secure is not None:
c['secure'] = secure
if httponly is not None:
c['httponly'] = httponly
if version is not None:
c['version'] = version | 0.016643 |
def text2labels(text, sents):
'''
Marks all characters in given `text`, that doesn't exists within any
element of `sents` with `1` character, other characters (within sentences)
will be marked with `0`
Used in training process
>>> text = 'привет. меня зовут аня.'
>>> sents = ['привет.', 'меня зовут аня.']
>>> labels = text2labels(text, sents)
>>> ' '.join(text)
>>> 'п р и в е т . м е н я з о в у т а н я .'
>>> ' '.join(labels)
>>> '0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0'
'''
labels = [c for c in text]
for sent in sents:
start = text.index(sent)
finish = start + len(sent)
labels[start:finish] = '0' * len(sent)
for i, c in enumerate(labels):
if c != '0':
labels[i] = '1'
return labels | 0.001227 |
def Parse(conditions):
"""Parses the file finder condition types into the condition objects.
Args:
conditions: An iterator over `FileFinderCondition` objects.
Yields:
`MetadataCondition` objects that correspond to the file-finder conditions.
"""
kind = rdf_file_finder.FileFinderCondition.Type
classes = {
kind.MODIFICATION_TIME: ModificationTimeCondition,
kind.ACCESS_TIME: AccessTimeCondition,
kind.INODE_CHANGE_TIME: InodeChangeTimeCondition,
kind.SIZE: SizeCondition,
kind.EXT_FLAGS: ExtFlagsCondition,
}
for condition in conditions:
try:
yield classes[condition.condition_type](condition)
except KeyError:
pass | 0.005479 |
def run_base_recalibration(job, bam, bai, ref, ref_dict, fai, dbsnp, mills, unsafe=False):
"""
Creates recalibration table for Base Quality Score Recalibration
:param JobFunctionWrappingJob job: passed automatically by Toil
:param str bam: FileStoreID for BAM file
:param str bai: FileStoreID for BAM index file
:param str ref: FileStoreID for reference genome fasta file
:param str ref_dict: FileStoreID for reference genome sequence dictionary file
:param str fai: FileStoreID for reference genome fasta index file
:param str dbsnp: FileStoreID for dbSNP VCF file
:param str mills: FileStoreID for Mills VCF file
:param bool unsafe: If True, runs GATK in UNSAFE mode: "-U ALLOW_SEQ_DICT_INCOMPATIBILITY"
:return: FileStoreID for the recalibration table file
:rtype: str
"""
inputs = {'ref.fasta': ref,
'ref.fasta.fai': fai,
'ref.dict': ref_dict,
'input.bam': bam,
'input.bai': bai,
'dbsnp.vcf': dbsnp,
'mills.vcf': mills}
work_dir = job.fileStore.getLocalTempDir()
for name, file_store_id in inputs.iteritems():
job.fileStore.readGlobalFile(file_store_id, os.path.join(work_dir, name))
# Call: GATK -- BaseRecalibrator
parameters = ['-T', 'BaseRecalibrator',
'-nct', str(int(job.cores)),
'-R', '/data/ref.fasta',
'-I', '/data/input.bam',
# Recommended known sites:
# https://software.broadinstitute.org/gatk/guide/article?id=1247
'-knownSites', '/data/dbsnp.vcf',
'-knownSites', '/data/mills.vcf',
'-o', '/data/recal_data.table']
if unsafe:
parameters.extend(['-U', 'ALLOW_SEQ_DICT_INCOMPATIBILITY'])
# Set TMPDIR to /data to prevent writing temporary files to /tmp
docker_parameters = ['--rm',
'--log-driver', 'none',
'-e', 'JAVA_OPTS=-Djava.io.tmpdir=/data/ -Xmx{}'.format(job.memory),
'-v', '{}:/data'.format(work_dir)]
start_time = time.time()
dockerCall(job=job, tool='quay.io/ucsc_cgl/gatk:3.5--dba6dae49156168a909c43330350c6161dc7ecc2',
workDir=work_dir,
parameters=parameters,
dockerParameters=docker_parameters)
end_time = time.time()
_log_runtime(job, start_time, end_time, "GATK3 BaseRecalibrator")
return job.fileStore.writeGlobalFile(os.path.join(work_dir, 'recal_data.table')) | 0.003113 |
def performFirmwareUpdate(self, unDeviceIndex):
"""
Performs the actual firmware update if applicable.
The following events will be sent, if VRFirmwareError_None was returned: VREvent_FirmwareUpdateStarted, VREvent_FirmwareUpdateFinished
Use the properties Prop_Firmware_UpdateAvailable_Bool, Prop_Firmware_ManualUpdate_Bool, and Prop_Firmware_ManualUpdateURL_String
to figure our whether a firmware update is available, and to figure out whether its a manual update
Prop_Firmware_ManualUpdateURL_String should point to an URL describing the manual update process
"""
fn = self.function_table.performFirmwareUpdate
result = fn(unDeviceIndex)
return result | 0.012179 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.