code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def edit_inputs(client, workflow):
"""Edit workflow inputs."""
types = {
'int': int,
'string': str,
'File': lambda x: File(path=Path(x).resolve()),
}
for input_ in workflow.inputs:
convert = types.get(input_.type, str)
input_.default = convert(
click.prompt(
'{0.id} ({0.type})'.format(input_),
default=_format_default(client, input_.default),
)
)
return workflow
|
Edit workflow inputs.
|
def emit(self, action, payload=None, retry=0):
"""Emit action with payload.
:param action: an action slug
:param payload: data, default {}
:param retry: integer, default 0.
:return: information in form of dict.
"""
payload = payload or {}
if retry:
_retry = self.transport.retry(retry)
emit = _retry(self.transport.emit)
else:
emit = self.transport.emit
return emit(action, payload)
|
Emit action with payload.
:param action: an action slug
:param payload: data, default {}
:param retry: integer, default 0.
:return: information in form of dict.
|
def _load_image_labels(self):
"""
preprocess all ground-truths
Returns:
----------
labels packed in [num_images x max_num_objects x 5] tensor
"""
temp = []
# load ground-truth from xml annotations
for idx in self.image_set_index:
label_file = self._label_path_from_index(idx)
tree = ET.parse(label_file)
root = tree.getroot()
size = root.find('size')
width = float(size.find('width').text)
height = float(size.find('height').text)
label = []
for obj in root.iter('object'):
difficult = int(obj.find('difficult').text)
# if not self.config['use_difficult'] and difficult == 1:
# continue
cls_name = obj.find('name').text
if cls_name not in self.classes:
continue
cls_id = self.classes.index(cls_name)
xml_box = obj.find('bndbox')
xmin = float(xml_box.find('xmin').text) / width
ymin = float(xml_box.find('ymin').text) / height
xmax = float(xml_box.find('xmax').text) / width
ymax = float(xml_box.find('ymax').text) / height
label.append([cls_id, xmin, ymin, xmax, ymax, difficult])
temp.append(np.array(label))
return temp
|
preprocess all ground-truths
Returns:
----------
labels packed in [num_images x max_num_objects x 5] tensor
|
def hierarchy_spectrum(mg, filter=True, plot=False):
"""Examine a multilevel hierarchy's spectrum.
Parameters
----------
mg { pyamg multilevel hierarchy }
e.g. generated with smoothed_aggregation_solver(...) or
ruge_stuben_solver(...)
Returns
-------
(1) table to standard out detailing the spectrum of each level in mg
(2) if plot==True, a sequence of plots in the complex plane of the
spectrum at each level
Notes
-----
This can be useful for troubleshooting and when examining how your
problem's nature changes from level to level
Examples
--------
>>> from pyamg import smoothed_aggregation_solver
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import hierarchy_spectrum
>>> A = poisson( (1,), format='csr' )
>>> ml = smoothed_aggregation_solver(A)
>>> hierarchy_spectrum(ml)
<BLANKLINE>
Level min(re(eig)) max(re(eig)) num re(eig) < 0 num re(eig) > 0 cond_2(A)
---------------------------------------------------------------------------
0 2.000 2.000 0 1 1.00e+00
<BLANKLINE>
<BLANKLINE>
Level min(im(eig)) max(im(eig)) num im(eig) < 0 num im(eig) > 0 cond_2(A)
---------------------------------------------------------------------------
0 0.000 0.000 0 0 1.00e+00
<BLANKLINE>
"""
real_table = [['Level', 'min(re(eig))', 'max(re(eig))', 'num re(eig) < 0',
'num re(eig) > 0', 'cond_2(A)']]
imag_table = [['Level', 'min(im(eig))', 'max(im(eig))', 'num im(eig) < 0',
'num im(eig) > 0', 'cond_2(A)']]
for i in range(len(mg.levels)):
A = mg.levels[i].A.tocsr()
if filter is True:
# Filter out any zero rows and columns of A
A.eliminate_zeros()
nnz_per_row = A.indptr[0:-1] - A.indptr[1:]
nonzero_rows = (nnz_per_row != 0).nonzero()[0]
A = A.tocsc()
nnz_per_col = A.indptr[0:-1] - A.indptr[1:]
nonzero_cols = (nnz_per_col != 0).nonzero()[0]
nonzero_rowcols = sp.union1d(nonzero_rows, nonzero_cols)
A = np.mat(A.todense())
A = A[nonzero_rowcols, :][:, nonzero_rowcols]
else:
A = np.mat(A.todense())
e = eigvals(A)
c = cond(A)
lambda_min = min(sp.real(e))
lambda_max = max(sp.real(e))
num_neg = max(e[sp.real(e) < 0.0].shape)
num_pos = max(e[sp.real(e) > 0.0].shape)
real_table.append([str(i), ('%1.3f' % lambda_min),
('%1.3f' % lambda_max),
str(num_neg), str(num_pos), ('%1.2e' % c)])
lambda_min = min(sp.imag(e))
lambda_max = max(sp.imag(e))
num_neg = max(e[sp.imag(e) < 0.0].shape)
num_pos = max(e[sp.imag(e) > 0.0].shape)
imag_table.append([str(i), ('%1.3f' % lambda_min),
('%1.3f' % lambda_max),
str(num_neg), str(num_pos), ('%1.2e' % c)])
if plot:
import pylab
pylab.figure(i+1)
pylab.plot(sp.real(e), sp.imag(e), 'kx')
handle = pylab.title('Level %d Spectrum' % i)
handle.set_fontsize(19)
handle = pylab.xlabel('real(eig)')
handle.set_fontsize(17)
handle = pylab.ylabel('imag(eig)')
handle.set_fontsize(17)
print(print_table(real_table))
print(print_table(imag_table))
if plot:
pylab.show()
|
Examine a multilevel hierarchy's spectrum.
Parameters
----------
mg { pyamg multilevel hierarchy }
e.g. generated with smoothed_aggregation_solver(...) or
ruge_stuben_solver(...)
Returns
-------
(1) table to standard out detailing the spectrum of each level in mg
(2) if plot==True, a sequence of plots in the complex plane of the
spectrum at each level
Notes
-----
This can be useful for troubleshooting and when examining how your
problem's nature changes from level to level
Examples
--------
>>> from pyamg import smoothed_aggregation_solver
>>> from pyamg.gallery import poisson
>>> from pyamg.util.utils import hierarchy_spectrum
>>> A = poisson( (1,), format='csr' )
>>> ml = smoothed_aggregation_solver(A)
>>> hierarchy_spectrum(ml)
<BLANKLINE>
Level min(re(eig)) max(re(eig)) num re(eig) < 0 num re(eig) > 0 cond_2(A)
---------------------------------------------------------------------------
0 2.000 2.000 0 1 1.00e+00
<BLANKLINE>
<BLANKLINE>
Level min(im(eig)) max(im(eig)) num im(eig) < 0 num im(eig) > 0 cond_2(A)
---------------------------------------------------------------------------
0 0.000 0.000 0 0 1.00e+00
<BLANKLINE>
|
def assert_image_exists(self, pattern, timeout=20.0, **kwargs):
"""
Assert if image exists
Args:
- pattern: image filename # not support pattern for now
- timeout (float): seconds
- safe (bool): not raise assert error even throung failed.
"""
pattern = self.d.pattern_open(pattern)
match_kwargs = kwargs.copy()
match_kwargs.pop('safe', None)
match_kwargs.update({
'timeout': timeout,
'safe': True,
})
res = self.d.wait(pattern, **match_kwargs)
is_success = res is not None
message = 'assert image exists'
if res:
x, y = res.pos
kwargs['position'] = {'x': x, 'y': y}
message = 'image exists\npos %s\nconfidence=%.2f\nmethod=%s' % (res.pos, res.confidence, res.method)
else:
res = self.d.match(pattern)
if res is None:
message = 'Image not found'
else:
th = kwargs.get('threshold') or pattern.threshold or self.image_match_threshold
message = 'Matched: %s\nPosition: %s\nConfidence: %.2f\nThreshold: %.2f' % (
res.matched, res.pos, res.confidence, th)
kwargs['target'] = self._save_screenshot(pattern, name_prefix='target')
kwargs['screenshot'] = self.last_screenshot
kwargs.update({
'action': 'assert_image_exists',
'message': message,
'success': is_success,
})
self._add_assert(**kwargs)
|
Assert if image exists
Args:
- pattern: image filename # not support pattern for now
- timeout (float): seconds
- safe (bool): not raise assert error even throung failed.
|
def validate_unit_process_ids(self, expected, actual):
"""Validate process id quantities for services on units."""
self.log.debug('Checking units for running processes...')
self.log.debug('Expected PIDs: {}'.format(expected))
self.log.debug('Actual PIDs: {}'.format(actual))
if len(actual) != len(expected):
return ('Unit count mismatch. expected, actual: {}, '
'{} '.format(len(expected), len(actual)))
for (e_sentry, e_proc_names) in six.iteritems(expected):
e_sentry_name = e_sentry.info['unit_name']
if e_sentry in actual.keys():
a_proc_names = actual[e_sentry]
else:
return ('Expected sentry ({}) not found in actual dict data.'
'{}'.format(e_sentry_name, e_sentry))
if len(e_proc_names.keys()) != len(a_proc_names.keys()):
return ('Process name count mismatch. expected, actual: {}, '
'{}'.format(len(expected), len(actual)))
for (e_proc_name, e_pids), (a_proc_name, a_pids) in \
zip(e_proc_names.items(), a_proc_names.items()):
if e_proc_name != a_proc_name:
return ('Process name mismatch. expected, actual: {}, '
'{}'.format(e_proc_name, a_proc_name))
a_pids_length = len(a_pids)
fail_msg = ('PID count mismatch. {} ({}) expected, actual: '
'{}, {} ({})'.format(e_sentry_name, e_proc_name,
e_pids, a_pids_length,
a_pids))
# If expected is a list, ensure at least one PID quantity match
if isinstance(e_pids, list) and \
a_pids_length not in e_pids:
return fail_msg
# If expected is not bool and not list,
# ensure PID quantities match
elif not isinstance(e_pids, bool) and \
not isinstance(e_pids, list) and \
a_pids_length != e_pids:
return fail_msg
# If expected is bool True, ensure 1 or more PIDs exist
elif isinstance(e_pids, bool) and \
e_pids is True and a_pids_length < 1:
return fail_msg
# If expected is bool False, ensure 0 PIDs exist
elif isinstance(e_pids, bool) and \
e_pids is False and a_pids_length != 0:
return fail_msg
else:
self.log.debug('PID check OK: {} {} {}: '
'{}'.format(e_sentry_name, e_proc_name,
e_pids, a_pids))
return None
|
Validate process id quantities for services on units.
|
def snmp_server_community_ipv4_acl(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
snmp_server = ET.SubElement(config, "snmp-server", xmlns="urn:brocade.com:mgmt:brocade-snmp")
community = ET.SubElement(snmp_server, "community")
community_key = ET.SubElement(community, "community")
community_key.text = kwargs.pop('community')
ipv4_acl = ET.SubElement(community, "ipv4-acl")
ipv4_acl.text = kwargs.pop('ipv4_acl')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def convert_cifar10(directory, output_directory,
output_filename='cifar10.hdf5'):
"""Converts the CIFAR-10 dataset to HDF5.
Converts the CIFAR-10 dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CIFAR10`. The converted dataset is saved as
'cifar10.hdf5'.
It assumes the existence of the following file:
* `cifar-10-python.tar.gz`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'cifar10.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
"""
output_path = os.path.join(output_directory, output_filename)
h5file = h5py.File(output_path, mode='w')
input_file = os.path.join(directory, DISTRIBUTION_FILE)
tar_file = tarfile.open(input_file, 'r:gz')
train_batches = []
for batch in range(1, 6):
file = tar_file.extractfile(
'cifar-10-batches-py/data_batch_%d' % batch)
try:
if six.PY3:
array = cPickle.load(file, encoding='latin1')
else:
array = cPickle.load(file)
train_batches.append(array)
finally:
file.close()
train_features = numpy.concatenate(
[batch['data'].reshape(batch['data'].shape[0], 3, 32, 32)
for batch in train_batches])
train_labels = numpy.concatenate(
[numpy.array(batch['labels'], dtype=numpy.uint8)
for batch in train_batches])
train_labels = numpy.expand_dims(train_labels, 1)
file = tar_file.extractfile('cifar-10-batches-py/test_batch')
try:
if six.PY3:
test = cPickle.load(file, encoding='latin1')
else:
test = cPickle.load(file)
finally:
file.close()
test_features = test['data'].reshape(test['data'].shape[0],
3, 32, 32)
test_labels = numpy.array(test['labels'], dtype=numpy.uint8)
test_labels = numpy.expand_dims(test_labels, 1)
data = (('train', 'features', train_features),
('train', 'targets', train_labels),
('test', 'features', test_features),
('test', 'targets', test_labels))
fill_hdf5_file(h5file, data)
h5file['features'].dims[0].label = 'batch'
h5file['features'].dims[1].label = 'channel'
h5file['features'].dims[2].label = 'height'
h5file['features'].dims[3].label = 'width'
h5file['targets'].dims[0].label = 'batch'
h5file['targets'].dims[1].label = 'index'
h5file.flush()
h5file.close()
return (output_path,)
|
Converts the CIFAR-10 dataset to HDF5.
Converts the CIFAR-10 dataset to an HDF5 dataset compatible with
:class:`fuel.datasets.CIFAR10`. The converted dataset is saved as
'cifar10.hdf5'.
It assumes the existence of the following file:
* `cifar-10-python.tar.gz`
Parameters
----------
directory : str
Directory in which input files reside.
output_directory : str
Directory in which to save the converted dataset.
output_filename : str, optional
Name of the saved dataset. Defaults to 'cifar10.hdf5'.
Returns
-------
output_paths : tuple of str
Single-element tuple containing the path to the converted dataset.
|
def functions(self):
"""Returns a dictionary containing the functions defined in this
object. The keys are function names (as exposed in templates)
and the values are Python functions.
"""
out = {}
for key in self._func_names:
out[key[len(self._prefix):]] = getattr(self, key)
return out
|
Returns a dictionary containing the functions defined in this
object. The keys are function names (as exposed in templates)
and the values are Python functions.
|
def delete(self, request, key):
"""Remove an email address, validated or not."""
request.DELETE = http.QueryDict(request.body)
email_addr = request.DELETE.get('email')
user_id = request.DELETE.get('user')
if not email_addr:
return http.HttpResponseBadRequest()
try:
email = EmailAddressValidation.objects.get(address=email_addr,
user_id=user_id)
except EmailAddressValidation.DoesNotExist:
pass
else:
email.delete()
return http.HttpResponse(status=204)
try:
email = EmailAddress.objects.get(address=email_addr,
user_id=user_id)
except EmailAddress.DoesNotExist:
raise http.Http404
email.user = None
email.save()
return http.HttpResponse(status=204)
|
Remove an email address, validated or not.
|
def make_owner(user):
'''
Makes the given user a owner and tutor.
'''
tutor_group, owner_group = _get_user_groups()
user.is_staff = True
user.is_superuser = False
user.save()
owner_group.user_set.add(user)
owner_group.save()
tutor_group.user_set.add(user)
tutor_group.save()
|
Makes the given user a owner and tutor.
|
def keystoneclient(request, admin=False):
"""Returns a client connected to the Keystone backend.
Several forms of authentication are supported:
* Username + password -> Unscoped authentication
* Username + password + tenant id -> Scoped authentication
* Unscoped token -> Unscoped authentication
* Unscoped token + tenant id -> Scoped authentication
* Scoped token -> Scoped authentication
Available services and data from the backend will vary depending on
whether the authentication was scoped or unscoped.
Lazy authentication if an ``endpoint`` parameter is provided.
Calls requiring the admin endpoint should have ``admin=True`` passed in
as a keyword argument.
The client is cached so that subsequent API calls during the same
request/response cycle don't have to be re-authenticated.
"""
client_version = VERSIONS.get_active_version()
user = request.user
token_id = user.token.id
if is_multi_domain_enabled():
# Cloud Admin, Domain Admin or Mixed Domain Admin
if is_domain_admin(request):
domain_token = request.session.get('domain_token')
if domain_token:
token_id = getattr(domain_token, 'auth_token', None)
if admin:
if not policy.check((("identity", "admin_required"),), request):
raise exceptions.NotAuthorized
endpoint_type = 'adminURL'
else:
endpoint_type = getattr(settings,
'OPENSTACK_ENDPOINT_TYPE',
'publicURL')
# Take care of client connection caching/fetching a new client.
# Admin vs. non-admin clients are cached separately for token matching.
cache_attr = "_keystoneclient_admin" if admin \
else backend.KEYSTONE_CLIENT_ATTR
if (hasattr(request, cache_attr) and
(not user.token.id or
getattr(request, cache_attr).auth_token == user.token.id)):
conn = getattr(request, cache_attr)
else:
endpoint = _get_endpoint_url(request, endpoint_type)
verify = not getattr(settings, 'OPENSTACK_SSL_NO_VERIFY', False)
cacert = getattr(settings, 'OPENSTACK_SSL_CACERT', None)
verify = verify and cacert
LOG.debug("Creating a new keystoneclient connection to %s.", endpoint)
remote_addr = request.environ.get('REMOTE_ADDR', '')
token_auth = token_endpoint.Token(endpoint=endpoint,
token=token_id)
keystone_session = session.Session(auth=token_auth,
original_ip=remote_addr,
verify=verify)
conn = client_version['client'].Client(session=keystone_session,
debug=settings.DEBUG)
setattr(request, cache_attr, conn)
return conn
|
Returns a client connected to the Keystone backend.
Several forms of authentication are supported:
* Username + password -> Unscoped authentication
* Username + password + tenant id -> Scoped authentication
* Unscoped token -> Unscoped authentication
* Unscoped token + tenant id -> Scoped authentication
* Scoped token -> Scoped authentication
Available services and data from the backend will vary depending on
whether the authentication was scoped or unscoped.
Lazy authentication if an ``endpoint`` parameter is provided.
Calls requiring the admin endpoint should have ``admin=True`` passed in
as a keyword argument.
The client is cached so that subsequent API calls during the same
request/response cycle don't have to be re-authenticated.
|
def transitive_subgraph_of_addresses_bfs(self,
addresses,
predicate=None,
dep_predicate=None):
"""Returns the transitive dependency closure of `addresses` using BFS.
:API: public
:param list<Address> addresses: The closure of `addresses` will be walked.
:param function predicate: If this parameter is not given, no Targets will be filtered
out of the closure. If it is given, any Target which fails the predicate will not be
walked, nor will its dependencies. Thus predicate effectively trims out any subgraph
that would only be reachable through Targets that fail the predicate.
:param function dep_predicate: Takes two parameters, the current target and the dependency of
the current target. If this parameter is not given, no dependencies will be filtered
when traversing the closure. If it is given, when the predicate fails, the edge to the dependency
will not be expanded.
"""
walk = self._walk_factory(dep_predicate)
ordered_closure = OrderedSet()
to_walk = deque((0, addr) for addr in addresses)
while len(to_walk) > 0:
level, address = to_walk.popleft()
if not walk.expand_once(address, level):
continue
target = self._target_by_address[address]
if predicate and not predicate(target):
continue
if walk.do_work_once(address):
ordered_closure.add(target)
for dep_address in self._target_dependencies_by_address[address]:
if walk.expanded_or_worked(dep_address):
continue
if walk.dep_predicate(target, self._target_by_address[dep_address], level):
to_walk.append((level + 1, dep_address))
return ordered_closure
|
Returns the transitive dependency closure of `addresses` using BFS.
:API: public
:param list<Address> addresses: The closure of `addresses` will be walked.
:param function predicate: If this parameter is not given, no Targets will be filtered
out of the closure. If it is given, any Target which fails the predicate will not be
walked, nor will its dependencies. Thus predicate effectively trims out any subgraph
that would only be reachable through Targets that fail the predicate.
:param function dep_predicate: Takes two parameters, the current target and the dependency of
the current target. If this parameter is not given, no dependencies will be filtered
when traversing the closure. If it is given, when the predicate fails, the edge to the dependency
will not be expanded.
|
def lookup(self):
"""
The meat of this middleware.
Returns None and sets settings.SITE_ID if able to find a Site
object by domain and its subdomain is valid.
Returns an HttpResponsePermanentRedirect to the Site's default
subdomain if a site is found but the requested subdomain
is not supported, or if domain_unsplit is defined in
settings.HOSTNAME_REDIRECTS
Otherwise, returns False.
"""
# check to see if this hostname is actually a env hostname
if self.domain:
if self.subdomain:
self.domain_unsplit = '%s.%s' % (self.subdomain, self.domain)
else:
self.domain_unsplit = self.domain
self.domain_requested = self.domain_unsplit
# check cache
cache_key = 'site_id:%s' % self.domain_unsplit
site_id = cache.get(cache_key)
if site_id:
SITE_ID.value = site_id
try:
self.site = Site.objects.get(id=site_id)
except Site.DoesNotExist:
# This might happen if the Site object was deleted from the
# database after it was cached. Remove from cache and act
# as if the cache lookup failed.
cache.delete(cache_key)
else:
return None
# check database
try:
self.site = Site.objects.get(domain=self.domain)
except Site.DoesNotExist:
return False
if not self.site:
return False
SITE_ID.value = self.site.pk
cache.set(cache_key, SITE_ID.value, 5*60)
return None
|
The meat of this middleware.
Returns None and sets settings.SITE_ID if able to find a Site
object by domain and its subdomain is valid.
Returns an HttpResponsePermanentRedirect to the Site's default
subdomain if a site is found but the requested subdomain
is not supported, or if domain_unsplit is defined in
settings.HOSTNAME_REDIRECTS
Otherwise, returns False.
|
def kvlclient(self):
'''Return a thread local ``kvlayer`` client.'''
if self._kvlclient is None:
self._kvlclient = kvlayer.client()
return self._kvlclient
|
Return a thread local ``kvlayer`` client.
|
def connection_lost(self, exc=None):
"""Fires the ``connection_lost`` event.
"""
if self._loop.get_debug():
self.producer.logger.debug('connection lost %s', self)
self.event('connection_lost').fire(exc=exc)
|
Fires the ``connection_lost`` event.
|
def _iteratively_analyze_function_features(self, all_funcs_completed=False):
"""
Iteratively analyze function features until a fixed point is reached.
:return: the "changes" dict
:rtype: dict
"""
changes = {
'functions_do_not_return': set(),
'functions_return': set()
}
while True:
new_changes = self._analyze_function_features(all_funcs_completed=all_funcs_completed)
changes['functions_do_not_return'] |= set(new_changes['functions_do_not_return'])
changes['functions_return'] |= set(new_changes['functions_return'])
if not new_changes['functions_do_not_return'] and not new_changes['functions_return']:
# a fixed point is reached
break
return changes
|
Iteratively analyze function features until a fixed point is reached.
:return: the "changes" dict
:rtype: dict
|
def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
"""
if not self._is_open:
raise IOError('Not opened.')
if self._current_offset < 0:
raise IOError(
'Invalid current offset: {0:d} value less than zero.'.format(
self._current_offset))
if self._decrypted_stream_size is None:
self._decrypted_stream_size = self._GetDecryptedStreamSize()
if self._decrypted_stream_size < 0:
raise IOError('Invalid decrypted stream size.')
if self._current_offset >= self._decrypted_stream_size:
return b''
if self._realign_offset:
self._AlignDecryptedDataOffset(self._current_offset)
self._realign_offset = False
if size is None:
size = self._decrypted_stream_size
if self._current_offset + size > self._decrypted_stream_size:
size = self._decrypted_stream_size - self._current_offset
decrypted_data = b''
if size == 0:
return decrypted_data
while size > self._decrypted_data_size:
decrypted_data = b''.join([
decrypted_data,
self._decrypted_data[self._decrypted_data_offset:]])
remaining_decrypted_data_size = (
self._decrypted_data_size - self._decrypted_data_offset)
self._current_offset += remaining_decrypted_data_size
size -= remaining_decrypted_data_size
if self._current_offset >= self._decrypted_stream_size:
break
read_count = self._ReadEncryptedData(self._ENCRYPTED_DATA_BUFFER_SIZE)
self._decrypted_data_offset = 0
if read_count == 0:
break
if size > 0:
slice_start_offset = self._decrypted_data_offset
slice_end_offset = slice_start_offset + size
decrypted_data = b''.join([
decrypted_data,
self._decrypted_data[slice_start_offset:slice_end_offset]])
self._decrypted_data_offset += size
self._current_offset += size
return decrypted_data
|
Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size (Optional[int]): number of bytes to read, where None is all
remaining data.
Returns:
bytes: data read.
Raises:
IOError: if the read failed.
OSError: if the read failed.
|
def ping():
'''
Is the marathon api responding?
'''
try:
response = salt.utils.http.query(
"{0}/ping".format(CONFIG[CONFIG_BASE_URL]),
decode_type='plain',
decode=True,
)
log.debug(
'marathon.info returned successfully: %s',
response,
)
if 'text' in response and response['text'].strip() == 'pong':
return True
except Exception as ex:
log.error(
'error calling marathon.info with base_url %s: %s',
CONFIG[CONFIG_BASE_URL],
ex,
)
return False
|
Is the marathon api responding?
|
def corrcoef(time, crossf, integration_window=0.):
"""
Calculate the correlation coefficient for given auto- and crosscorrelation
functions. Standard settings yield the zero lag correlation coefficient.
Setting integration_window > 0 yields the correlation coefficient of
integrated auto- and crosscorrelation functions. The correlation coefficient
between a zero signal with any other signal is defined as 0.
Parameters
----------
time : numpy.ndarray
1 dim array of times corresponding to signal.
crossf : numpy.ndarray
Crosscorrelation functions, 1st axis first unit, 2nd axis second unit,
3rd axis times.
integration_window: float
Size of the integration window.
Returns
-------
cc : numpy.ndarray
2 dim array of correlation coefficient between two units.
"""
N = len(crossf)
cc = np.zeros(np.shape(crossf)[:-1])
tbin = abs(time[1] - time[0])
lim = int(integration_window / tbin)
if len(time)%2 == 0:
mid = len(time)/2-1
else:
mid = np.floor(len(time)/2.)
for i in range(N):
ai = np.sum(crossf[i, i][mid - lim:mid + lim + 1])
offset_autoi = np.mean(crossf[i,i][:mid-1])
for j in range(N):
cij = np.sum(crossf[i, j][mid - lim:mid + lim + 1])
offset_cross = np.mean(crossf[i,j][:mid-1])
aj = np.sum(crossf[j, j][mid - lim:mid + lim + 1])
offset_autoj = np.mean(crossf[j,j][:mid-1])
if ai > 0. and aj > 0.:
cc[i, j] = (cij-offset_cross) / np.sqrt((ai-offset_autoi) * \
(aj-offset_autoj))
else:
cc[i, j] = 0.
return cc
|
Calculate the correlation coefficient for given auto- and crosscorrelation
functions. Standard settings yield the zero lag correlation coefficient.
Setting integration_window > 0 yields the correlation coefficient of
integrated auto- and crosscorrelation functions. The correlation coefficient
between a zero signal with any other signal is defined as 0.
Parameters
----------
time : numpy.ndarray
1 dim array of times corresponding to signal.
crossf : numpy.ndarray
Crosscorrelation functions, 1st axis first unit, 2nd axis second unit,
3rd axis times.
integration_window: float
Size of the integration window.
Returns
-------
cc : numpy.ndarray
2 dim array of correlation coefficient between two units.
|
def delete(handler, item_id, id_name):
"""Delete an item"""
data = {'operation': 'delete',
'id': item_id,
'id_name': id_name}
handler.invoke(data)
|
Delete an item
|
def _createStructure(self, linkResult, replaceParamFile):
"""
Create GSSHAPY Structure Objects Method
"""
# Constants
WEIRS = ('WEIR', 'SAG_WEIR')
CULVERTS = ('ROUND_CULVERT', 'RECT_CULVERT')
CURVES = ('RATING_CURVE', 'SCHEDULED_RELEASE', 'RULE_CURVE')
header = linkResult['header']
# Initialize GSSHAPY StreamLink object
link = StreamLink(linkNumber=header['link'],
type=linkResult['type'],
numElements=header['numstructs'])
# Associate StreamLink with ChannelInputFile
link.channelInputFile = self
# Create Structure objects
for s in linkResult['structures']:
structType = s['structtype']
# Cases
if structType in WEIRS:
# Weir type handler
# Initialize GSSHAPY Weir object
weir = Weir(type=structType,
crestLength=vrp(s['crest_length'], replaceParamFile),
crestLowElevation=vrp(s['crest_low_elev'], replaceParamFile),
dischargeCoeffForward=vrp(s['discharge_coeff_forward'], replaceParamFile),
dischargeCoeffReverse=vrp(s['discharge_coeff_reverse'], replaceParamFile),
crestLowLocation=vrp(s['crest_low_loc'], replaceParamFile),
steepSlope=vrp(s['steep_slope'], replaceParamFile),
shallowSlope=vrp(s['shallow_slope'], replaceParamFile))
# Associate Weir with StreamLink
weir.streamLink = link
elif structType in CULVERTS:
# Culvert type handler
# Initialize GSSHAPY Culvert object
culvert = Culvert(type=structType,
upstreamInvert=vrp(s['upinvert'], replaceParamFile),
downstreamInvert=vrp(s['downinvert'], replaceParamFile),
inletDischargeCoeff=vrp(s['inlet_disch_coeff'], replaceParamFile),
reverseFlowDischargeCoeff=vrp(s['rev_flow_disch_coeff'], replaceParamFile),
slope=vrp(s['slope'], replaceParamFile),
length=vrp(s['length'], replaceParamFile),
roughness=vrp(s['rough_coeff'], replaceParamFile),
diameter=vrp(s['diameter'], replaceParamFile),
width=vrp(s['width'], replaceParamFile),
height=vrp(s['height'], replaceParamFile))
# Associate Culvert with StreamLink
culvert.streamLink = link
elif structType in CURVES:
# Curve type handler
pass
return link
|
Create GSSHAPY Structure Objects Method
|
def check_and_order_id_inputs(rid, ridx, cid, cidx, row_meta_df, col_meta_df):
"""
Makes sure that (if entered) id inputs entered are of one type (string id or index)
Input:
- rid (list or None): if not None, a list of rids
- ridx (list or None): if not None, a list of indexes
- cid (list or None): if not None, a list of cids
- cidx (list or None): if not None, a list of indexes
Output:
- a tuple of the ordered ridx and cidx
"""
(row_type, row_ids) = check_id_idx_exclusivity(rid, ridx)
(col_type, col_ids) = check_id_idx_exclusivity(cid, cidx)
row_ids = check_and_convert_ids(row_type, row_ids, row_meta_df)
ordered_ridx = get_ordered_idx(row_type, row_ids, row_meta_df)
col_ids = check_and_convert_ids(col_type, col_ids, col_meta_df)
ordered_cidx = get_ordered_idx(col_type, col_ids, col_meta_df)
return (ordered_ridx, ordered_cidx)
|
Makes sure that (if entered) id inputs entered are of one type (string id or index)
Input:
- rid (list or None): if not None, a list of rids
- ridx (list or None): if not None, a list of indexes
- cid (list or None): if not None, a list of cids
- cidx (list or None): if not None, a list of indexes
Output:
- a tuple of the ordered ridx and cidx
|
def median_date(dt_list):
"""Calcuate median datetime from datetime list
"""
#dt_list_sort = sorted(dt_list)
idx = len(dt_list)/2
if len(dt_list) % 2 == 0:
md = mean_date([dt_list[idx-1], dt_list[idx]])
else:
md = dt_list[idx]
return md
|
Calcuate median datetime from datetime list
|
def set_datastore_policy(self, func):
"""Set the context datastore policy function.
Args:
func: A function that accepts a Key instance as argument and returns
a bool indicating if it should use the datastore. May be None.
"""
if func is None:
func = self.default_datastore_policy
elif isinstance(func, bool):
func = lambda unused_key, flag=func: flag
self._datastore_policy = func
|
Set the context datastore policy function.
Args:
func: A function that accepts a Key instance as argument and returns
a bool indicating if it should use the datastore. May be None.
|
def _decode_png(self, encoded_observation):
"""Decodes a single observation from PNG."""
return self._session.obj.run(
self._decoded_image_t.obj,
feed_dict={self._encoded_image_p.obj: encoded_observation}
)
|
Decodes a single observation from PNG.
|
def postprocess(options):
""" perform parametric fit of the test statistics and provide permutation and test pvalues """
resdir = options.resdir
out_file = options.outfile
tol = options.tol
print('.. load permutation results')
file_name = os.path.join(resdir,'perm*','*.res')
files = glob.glob(file_name)
LLR0 = []
for _file in files:
print(_file)
LLR0.append(NP.loadtxt(_file,usecols=[6]))
LLR0 = NP.concatenate(LLR0)
print('.. fit test statistics')
t0 = time.time()
c2m = C2M.Chi2mixture(tol=4e-3)
c2m.estimate_chi2mixture(LLR0)
pv0 = c2m.sf(LLR0)
t1 = time.time()
print(('finished in %s seconds'%(t1-t0)))
print('.. export permutation results')
perm_file = out_file+'.perm'
RV = NP.array([LLR0,pv0]).T
NP.savetxt(perm_file,RV,delimiter='\t',fmt='%.6f %.6e')
print('.. load test results')
file_name = os.path.join(resdir,'test','*.res')
files = glob.glob(file_name)
RV_test = []
for _file in files:
print(_file)
RV_test.append(NP.loadtxt(_file))
RV_test = NP.concatenate(RV_test)
print('.. calc pvalues')
pv = c2m.sf(RV_test[:,-1])[:,NP.newaxis]
print('.. export test results')
perm_file = out_file+'.test'
RV_test = NP.hstack([RV_test,pv])
NP.savetxt(perm_file,RV_test,delimiter='\t',fmt='%d %d %d %d %d %d %.6e %.6e')
if options.manhattan:
manhattan_file = out_file+'.manhattan.jpg'
plot_manhattan(pv,manhattan_file)
|
perform parametric fit of the test statistics and provide permutation and test pvalues
|
def fig_height(self):
"""Figure out the height of this plot."""
# hand-tuned
return (
4
+ len(self.data) * len(self.var_names)
- 1
+ 0.1 * sum(1 for j in self.plotters.values() for _ in j.iterator())
)
|
Figure out the height of this plot.
|
def abort(self):
"""
Handle request to cancel HTTP call
"""
if (self.reply and self.reply.isRunning()):
self.on_abort = True
self.reply.abort()
|
Handle request to cancel HTTP call
|
async def read(cls, id: int):
"""Get `BootResource` by `id`."""
data = await cls._handler.read(id=id)
return cls(data)
|
Get `BootResource` by `id`.
|
def _gen_glob_data(dir, pattern, child_table):
"""Generates node data by globbing a directory for a pattern"""
dir = pathlib.Path(dir)
matched = False
used_names = set() # Used by to_nodename to prevent duplicate names
# sorted so that renames (if any) are consistently ordered
for filepath in sorted(dir.glob(pattern)):
if filepath.is_dir():
continue
else:
matched = True
# create node info
node_table = {} if child_table is None else child_table.copy()
filepath = filepath.relative_to(dir)
node_table[RESERVED['file']] = str(filepath)
node_name = to_nodename(filepath.stem, invalid=used_names)
used_names.add(node_name)
print("Matched with {!r}: {!r} from {!r}".format(pattern, node_name, str(filepath)))
yield node_name, node_table
if not matched:
print("Warning: {!r} matched no files.".format(pattern))
return
|
Generates node data by globbing a directory for a pattern
|
def getCell(self, row, width=None):
'Return DisplayWrapper for displayable cell value.'
cellval = wrapply(self.getValue, row)
typedval = wrapply(self.type, cellval)
if isinstance(typedval, TypedWrapper):
if isinstance(cellval, TypedExceptionWrapper): # calc failed
exc = cellval.exception
if cellval.forwarded:
dispval = str(cellval) # traceback.format_exception_only(type(exc), exc)[-1].strip()
else:
dispval = options.disp_error_val
return DisplayWrapper(cellval.val, error=exc.stacktrace,
display=dispval,
note=options.note_getter_exc,
notecolor='color_error')
elif typedval.val is None: # early out for strict None
return DisplayWrapper(None, display='', # force empty display for None
note=options.disp_note_none,
notecolor='color_note_type')
elif isinstance(typedval, TypedExceptionWrapper): # calc succeeded, type failed
return DisplayWrapper(typedval.val, display=str(cellval),
error=typedval.exception.stacktrace,
note=options.note_type_exc,
notecolor='color_warning')
else:
return DisplayWrapper(typedval.val, display=str(typedval.val),
note=options.note_type_exc,
notecolor='color_warning')
elif isinstance(typedval, threading.Thread):
return DisplayWrapper(None,
display=options.disp_pending,
note=options.note_pending,
notecolor='color_note_pending')
dw = DisplayWrapper(cellval)
try:
dw.display = self.format(typedval) or ''
if width and isNumeric(self):
dw.display = dw.display.rjust(width-1)
# annotate cells with raw value type in anytype columns, except for strings
if self.type is anytype and type(cellval) is not str:
typedesc = typemap.get(type(cellval), None)
dw.note = typedesc.icon if typedesc else options.note_unknown_type
dw.notecolor = 'color_note_type'
except Exception as e: # formatting failure
e.stacktrace = stacktrace()
dw.error = e
try:
dw.display = str(cellval)
except Exception as e:
dw.display = str(e)
dw.note = options.note_format_exc
dw.notecolor = 'color_warning'
return dw
|
Return DisplayWrapper for displayable cell value.
|
def write_document(document, out, validate=True):
"""
Write an SPDX RDF document.
- document - spdx.document instance.
- out - file like object that will be written to.
Optionally `validate` the document before writing and raise
InvalidDocumentError if document.validate returns False.
"""
if validate:
messages = []
messages = document.validate(messages)
if messages:
raise InvalidDocumentError(messages)
writer = Writer(document, out)
writer.write()
|
Write an SPDX RDF document.
- document - spdx.document instance.
- out - file like object that will be written to.
Optionally `validate` the document before writing and raise
InvalidDocumentError if document.validate returns False.
|
def get_file_size(filename):
"""
Get the file size of a given file
:param filename: string: pathname of a file
:return: human readable filesize
"""
if os.path.isfile(filename):
return convert_size(os.path.getsize(filename))
return None
|
Get the file size of a given file
:param filename: string: pathname of a file
:return: human readable filesize
|
def ladder_length(begin_word, end_word, word_list):
"""
Bidirectional BFS!!!
:type begin_word: str
:type end_word: str
:type word_list: Set[str]
:rtype: int
"""
if len(begin_word) != len(end_word):
return -1 # not possible
if begin_word == end_word:
return 0
# when only differ by 1 character
if sum(c1 != c2 for c1, c2 in zip(begin_word, end_word)) == 1:
return 1
begin_set = set()
end_set = set()
begin_set.add(begin_word)
end_set.add(end_word)
result = 2
while begin_set and end_set:
if len(begin_set) > len(end_set):
begin_set, end_set = end_set, begin_set
next_begin_set = set()
for word in begin_set:
for ladder_word in word_range(word):
if ladder_word in end_set:
return result
if ladder_word in word_list:
next_begin_set.add(ladder_word)
word_list.remove(ladder_word)
begin_set = next_begin_set
result += 1
# print(begin_set)
# print(result)
return -1
|
Bidirectional BFS!!!
:type begin_word: str
:type end_word: str
:type word_list: Set[str]
:rtype: int
|
def get_queryset(self, request):
"""
Make special filtering by user's permissions.
"""
if not request.user.has_perm('zinnia.can_view_all'):
queryset = self.model.objects.filter(authors__pk=request.user.pk)
else:
queryset = super(EntryAdmin, self).get_queryset(request)
return queryset.prefetch_related('categories', 'authors', 'sites')
|
Make special filtering by user's permissions.
|
def odata_converter(data, str_type):
''' Convert odata type
http://www.odata.org/documentation/odata-version-2-0/overview#AbstractTypeSystem
To be completed
'''
if not str_type:
return _str(data)
if str_type in ["Edm.Single", "Edm.Double"]:
return float(data)
elif "Edm.Int" in str_type:
return int(data)
else:
return _str(data)
|
Convert odata type
http://www.odata.org/documentation/odata-version-2-0/overview#AbstractTypeSystem
To be completed
|
def action_log_create(sender, instance, created, **kwargs):
"""
Signal receiver that creates a log entry when a model instance is first saved to the database.
Direct use is discouraged, connect your model through :py:func:`actionslog.registry.register` instead.
"""
if created:
changes = model_instance_diff(None, instance)
log_entry = LogAction.objects.create_log_action(
instance=instance,
action=LogAction.CREATE,
changes=json.dumps(changes),
)
|
Signal receiver that creates a log entry when a model instance is first saved to the database.
Direct use is discouraged, connect your model through :py:func:`actionslog.registry.register` instead.
|
def press_button(self, value):
"""
Click the button with the given label.
"""
button = find_button(world.browser, value)
if not button:
raise AssertionError(
"Cannot find a button named '{}'.".format(value))
button.click()
|
Click the button with the given label.
|
def validateAuthCode(code, redirect_uri, client_id, state=None, validationEndpoint='https://indieauth.com/auth', headers={}):
"""Call authorization endpoint to validate given auth code.
:param code: the auth code to validate
:param redirect_uri: redirect_uri for the given auth code
:param client_id: where to find the auth endpoint for the given auth code
:param state: state for the given auth code
:param validationEndpoint: URL to make the validation request at
:param headers: optional headers to send with any request
:rtype: True if auth code is valid
"""
payload = {'code': code,
'redirect_uri': redirect_uri,
'client_id': client_id,
}
if state is not None:
payload['state'] = state
authURL = None
authEndpoints = discoverAuthEndpoints(client_id, headers=headers)
for url in authEndpoints['authorization_endpoint']:
authURL = url
break
if authURL is not None:
validationEndpoint = ParseResult(authURL.scheme, authURL.netloc, authURL.path, '', '', '').geturl()
r = requests.post(validationEndpoint, verify=True, data=payload, headers=headers)
result = { 'status': r.status_code,
'headers': r.headers
}
if 'charset' in r.headers.get('content-type', ''):
result['content'] = r.text
else:
result['content'] = r.content
if r.status_code == requests.codes.ok:
result['response'] = parse_qs(result['content'])
return result
|
Call authorization endpoint to validate given auth code.
:param code: the auth code to validate
:param redirect_uri: redirect_uri for the given auth code
:param client_id: where to find the auth endpoint for the given auth code
:param state: state for the given auth code
:param validationEndpoint: URL to make the validation request at
:param headers: optional headers to send with any request
:rtype: True if auth code is valid
|
def terminate(self):
"""Stop the standalone manager."""
logger.info(__(
"Terminating Resolwe listener on channel '{}'.",
state.MANAGER_EXECUTOR_CHANNELS.queue
))
self._should_stop = True
|
Stop the standalone manager.
|
def get(self, name_or_klass):
"""
Gets a mode by name (or class)
:param name_or_klass: The name or the class of the mode to get
:type name_or_klass: str or type
:rtype: pyqode.core.api.Mode
"""
if not isinstance(name_or_klass, str):
name_or_klass = name_or_klass.__name__
return self._modes[name_or_klass]
|
Gets a mode by name (or class)
:param name_or_klass: The name or the class of the mode to get
:type name_or_klass: str or type
:rtype: pyqode.core.api.Mode
|
def reactToAMQPMessage(message, send_back):
"""
React to given (AMQP) message. `message` is expected to be
:py:func:`collections.namedtuple` structure from :mod:`.structures` filled
with all necessary data.
Args:
message (object): One of the request objects defined in
:mod:`.structures`.
send_back (fn reference): Reference to function for responding. This is
useful for progress monitoring for example. Function takes
one parameter, which may be response structure/namedtuple, or
string or whatever would be normally returned.
Returns:
object: Response class from :mod:`structures`.
Raises:
ValueError: if bad type of `message` structure is given.
"""
_hnas_protection()
if _instanceof(message, SaveRequest):
# Tree
if _instanceof(message.record, Tree):
tree_handler().add_tree(message.record)
return TreeInfo(
path=message.record.path,
url_by_path=_compose_tree_url(message.record),
url_by_issn=_compose_tree_url(message.record, issn_url=True),
)
# Publication
save_fn = save_publication
class_ref = DBPublication
# Archive
if _instanceof(message.record, Archive):
save_fn = save_archive
class_ref = DBArchive
return save_fn(
class_ref.from_comm(message.record)
)
elif _instanceof(message, SearchRequest):
# Publication
search_fn = search_publications
class_ref = DBPublication
# Archive
if _instanceof(message.query, Archive):
search_fn = search_archives
class_ref = DBArchive
results = search_fn(
class_ref.from_comm(message.query)
)
return SearchResult(
records=[
record.to_comm(light_request=message.light_request)
for record in results
]
)
raise ValueError("'%s' is unknown type of request!" % str(type(message)))
|
React to given (AMQP) message. `message` is expected to be
:py:func:`collections.namedtuple` structure from :mod:`.structures` filled
with all necessary data.
Args:
message (object): One of the request objects defined in
:mod:`.structures`.
send_back (fn reference): Reference to function for responding. This is
useful for progress monitoring for example. Function takes
one parameter, which may be response structure/namedtuple, or
string or whatever would be normally returned.
Returns:
object: Response class from :mod:`structures`.
Raises:
ValueError: if bad type of `message` structure is given.
|
def stats_enabled(self, value):
"""Setter method; for a description see the getter method."""
if value:
self.statistics.enable()
else:
self.statistics.disable()
|
Setter method; for a description see the getter method.
|
def get_endpoint_server_root(self):
"""Parses RemoteLRS object's endpoint and returns its root
:return: Root of the RemoteLRS object endpoint
:rtype: unicode
"""
parsed = urlparse(self._endpoint)
root = parsed.scheme + "://" + parsed.hostname
if parsed.port is not None:
root += ":" + unicode(parsed.port)
return root
|
Parses RemoteLRS object's endpoint and returns its root
:return: Root of the RemoteLRS object endpoint
:rtype: unicode
|
def construct_txt_file(self):
"""Construct the header of the txt file"""
textlines = ['Prediction of noncovalent interactions for PDB structure %s' % self.mol.pymol_name.upper(), ]
textlines.append("=" * len(textlines[0]))
textlines.append('Created on %s using PLIP v%s\n' % (time.strftime("%Y/%m/%d"), __version__))
textlines.append('If you are using PLIP in your work, please cite:')
textlines.append('Salentin,S. et al. PLIP: fully automated protein-ligand interaction profiler.')
textlines.append('Nucl. Acids Res. (1 July 2015) 43 (W1): W443-W447. doi: 10.1093/nar/gkv315\n')
if len(self.excluded) != 0:
textlines.append('Excluded molecules as ligands: %s\n' % ','.join([lig for lig in self.excluded]))
if config.DNARECEPTOR:
textlines.append('DNA/RNA in structure was chosen as the receptor part.\n')
return textlines
|
Construct the header of the txt file
|
def matrixplot(adata, var_names, groupby=None, use_raw=None, log=False, num_categories=7,
figsize=None, dendrogram=False, gene_symbols=None, var_group_positions=None, var_group_labels=None,
var_group_rotation=None, layer=None, standard_scale=None, swap_axes=False, show=None,
save=None, **kwds):
"""\
Creates a heatmap of the mean expression values per cluster of each var_names
If groupby is not given, the matrixplot assumes that all data belongs to a single
category.
Parameters
----------
{common_plot_args}
standard_scale : {{'var', 'group'}}, optional (default: None)
Whether or not to standardize that dimension between 0 and 1, meaning for each variable or group,
subtract the minimum and divide each by its maximum.
{show_save_ax}
**kwds : keyword arguments
Are passed to `matplotlib.pyplot.pcolor`.
Returns
-------
List of :class:`~matplotlib.axes.Axes`
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.pl.matrixplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'],
... groupby='bulk_labels', dendrogram=True)
"""
if use_raw is None and adata.raw is not None: use_raw = True
if isinstance(var_names, str):
var_names = [var_names]
categories, obs_tidy = _prepare_dataframe(adata, var_names, groupby, use_raw, log, num_categories,
gene_symbols=gene_symbols, layer=layer)
if groupby is None or len(categories) <= 1:
# dendrogram can only be computed between groupby categories
dendrogram = False
mean_obs = obs_tidy.groupby(level=0).mean()
if standard_scale == 'group':
mean_obs = mean_obs.sub(mean_obs.min(1), axis=0)
mean_obs = mean_obs.div(mean_obs.max(1), axis=0).fillna(0)
elif standard_scale == 'var':
mean_obs -= mean_obs.min(0)
mean_obs = (mean_obs / mean_obs.max(0)).fillna(0)
elif standard_scale is None:
pass
else:
logg.warn('Unknown type for standard_scale, ignored')
if dendrogram:
dendro_data = _reorder_categories_after_dendrogram(adata, groupby, dendrogram,
var_names=var_names,
var_group_labels=var_group_labels,
var_group_positions=var_group_positions)
var_group_labels = dendro_data['var_group_labels']
var_group_positions = dendro_data['var_group_positions']
# reorder matrix
if dendro_data['var_names_idx_ordered'] is not None:
# reorder columns (usually genes) if needed. This only happens when
# var_group_positions and var_group_labels is set
mean_obs = mean_obs.iloc[:,dendro_data['var_names_idx_ordered']]
# reorder rows (categories) to match the dendrogram order
mean_obs = mean_obs.iloc[dendro_data['categories_idx_ordered'], :]
colorbar_width = 0.2
if not swap_axes:
dendro_width = 0.8 if dendrogram else 0
if figsize is None:
height = len(categories) * 0.2 + 1 # +1 for labels
heatmap_width = len(var_names) * 0.32
width = heatmap_width + dendro_width + colorbar_width # +1.6 to account for the colorbar and + 1 to account for labels
else:
width, height = figsize
heatmap_width = width - (dendro_width + colorbar_width)
if var_group_positions is not None and len(var_group_positions) > 0:
# add some space in case 'brackets' want to be plotted on top of the image
height_ratios = [0.5, 10]
height += 0.5
else:
height_ratios = [0, 10.5]
# define a layout of 2 rows x 3 columns
# first row is for 'brackets' (if no brackets needed, the height of this row is zero)
# second row is for main content. This second row
# is divided into three axes:
# first ax is for the main matrix figure
# second ax is for the dendrogram
# third ax is for the color bar legend
fig = pl.figure(figsize=(width, height))
axs = gridspec.GridSpec(nrows=2, ncols=3, wspace=0.02, hspace=0.04,
width_ratios=[heatmap_width, dendro_width, colorbar_width],
height_ratios=height_ratios)
matrix_ax = fig.add_subplot(axs[1, 0])
y_ticks = np.arange(mean_obs.shape[0]) + 0.5
matrix_ax.set_yticks(y_ticks)
matrix_ax.set_yticklabels([mean_obs.index[idx] for idx in range(mean_obs.shape[0])])
if dendrogram:
dendro_ax = fig.add_subplot(axs[1, 1], sharey=matrix_ax)
_plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram, ticks=y_ticks)
pc = matrix_ax.pcolor(mean_obs, edgecolor='gray', **kwds)
# invert y axis to show categories ordered from top to bottom
matrix_ax.set_ylim(mean_obs.shape[0], 0)
x_ticks = np.arange(mean_obs.shape[1]) + 0.5
matrix_ax.set_xticks(x_ticks)
matrix_ax.set_xticklabels([mean_obs.columns[idx] for idx in range(mean_obs.shape[1])], rotation=90)
matrix_ax.tick_params(axis='both', labelsize='small')
matrix_ax.grid(False)
matrix_ax.set_xlim(-0.5, len(var_names) + 0.5)
matrix_ax.set_ylabel(groupby)
matrix_ax.set_xlim(0, mean_obs.shape[1])
# plot group legends on top of matrix_ax (if given)
if var_group_positions is not None and len(var_group_positions) > 0:
gene_groups_ax = fig.add_subplot(axs[0, 0], sharex=matrix_ax)
_plot_gene_groups_brackets(gene_groups_ax, group_positions=var_group_positions,
group_labels=var_group_labels, rotation=var_group_rotation,
left_adjustment=0.2, right_adjustment=0.8)
# plot colorbar
_plot_colorbar(pc, fig, axs[1, 2])
else:
dendro_height = 0.5 if dendrogram else 0
if var_group_positions is not None and len(var_group_positions) > 0:
# add some space in case 'color blocks' want to be plotted on the right of the image
vargroups_width = 0.4
else:
vargroups_width = 0
if figsize is None:
heatmap_height = len(var_names) * 0.2
height = dendro_height + heatmap_height + 1 # +1 for labels
heatmap_width = len(categories) * 0.3
width = heatmap_width + vargroups_width + colorbar_width
else:
width, height = figsize
heatmap_width = width - (vargroups_width + colorbar_width)
heatmap_height = height - dendro_height
# define a layout of 2 rows x 3 columns
# first row is for 'dendrogram' (if no dendrogram is plotted, the height of this row is zero)
# second row is for main content. This row
# is divided into three axes:
# first ax is for the main matrix figure
# second ax is for the groupby categories (eg. brackets)
# third ax is for the color bar legend
fig = pl.figure(figsize=(width, height))
axs = gridspec.GridSpec(nrows=2, ncols=3, wspace=0.05, hspace=0.005,
width_ratios=[heatmap_width, vargroups_width, colorbar_width],
height_ratios=[dendro_height, heatmap_height])
mean_obs = mean_obs.T
matrix_ax = fig.add_subplot(axs[1, 0])
pc = matrix_ax.pcolor(mean_obs, edgecolor='gray', **kwds)
y_ticks = np.arange(mean_obs.shape[0]) + 0.5
matrix_ax.set_yticks(y_ticks)
matrix_ax.set_yticklabels([mean_obs.index[idx] for idx in range(mean_obs.shape[0])])
x_ticks = np.arange(mean_obs.shape[1]) + 0.5
matrix_ax.set_xticks(x_ticks)
matrix_ax.set_xticklabels([mean_obs.columns[idx] for idx in range(mean_obs.shape[1])], rotation=90)
matrix_ax.tick_params(axis='both', labelsize='small')
matrix_ax.grid(False)
matrix_ax.set_xlim(0, len(categories))
matrix_ax.set_xlabel(groupby)
# invert y axis to show var_names ordered from top to bottom
matrix_ax.set_ylim(mean_obs.shape[0], 0)
if dendrogram:
dendro_ax = fig.add_subplot(axs[0, 0], sharex=matrix_ax)
_plot_dendrogram(dendro_ax, adata, groupby, dendrogram_key=dendrogram, ticks=x_ticks, orientation='top')
# plot group legends on top of matrix_ax (if given)
if var_group_positions is not None and len(var_group_positions) > 0:
gene_groups_ax = fig.add_subplot(axs[1, 1], sharey=matrix_ax)
_plot_gene_groups_brackets(gene_groups_ax, group_positions=var_group_positions,
group_labels=var_group_labels, rotation=var_group_rotation,
left_adjustment=0.2, right_adjustment=0.8, orientation='right')
# plot colorbar
_plot_colorbar(pc, fig, axs[1, 2])
utils.savefig_or_show('matrixplot', show=show, save=save)
return axs
|
\
Creates a heatmap of the mean expression values per cluster of each var_names
If groupby is not given, the matrixplot assumes that all data belongs to a single
category.
Parameters
----------
{common_plot_args}
standard_scale : {{'var', 'group'}}, optional (default: None)
Whether or not to standardize that dimension between 0 and 1, meaning for each variable or group,
subtract the minimum and divide each by its maximum.
{show_save_ax}
**kwds : keyword arguments
Are passed to `matplotlib.pyplot.pcolor`.
Returns
-------
List of :class:`~matplotlib.axes.Axes`
Examples
--------
>>> adata = sc.datasets.pbmc68k_reduced()
>>> sc.pl.matrixplot(adata, ['C1QA', 'PSAP', 'CD79A', 'CD79B', 'CST3', 'LYZ'],
... groupby='bulk_labels', dendrogram=True)
|
def get_forms(self):
"""
Initializes the forms defined in `form_classes` with initial data from `get_initial()`,
kwargs from get_form_kwargs() and form instance object from `get_objects()`.
"""
forms = {}
objects = self.get_objects()
initial = self.get_initial()
form_kwargs = self.get_form_kwargs()
for key, form_class in six.iteritems(self.form_classes):
forms[key] = form_class(instance=objects[key], initial=initial[key], **form_kwargs[key])
return forms
|
Initializes the forms defined in `form_classes` with initial data from `get_initial()`,
kwargs from get_form_kwargs() and form instance object from `get_objects()`.
|
def draw_bars(out_value, features, feature_type, width_separators, width_bar):
"""Draw the bars and separators."""
rectangle_list = []
separator_list = []
pre_val = out_value
for index, features in zip(range(len(features)), features):
if feature_type == 'positive':
left_bound = float(features[0])
right_bound = pre_val
pre_val = left_bound
separator_indent = np.abs(width_separators)
separator_pos = left_bound
colors = ['#FF0D57', '#FFC3D5']
else:
left_bound = pre_val
right_bound = float(features[0])
pre_val = right_bound
separator_indent = - np.abs(width_separators)
separator_pos = right_bound
colors = ['#1E88E5', '#D1E6FA']
# Create rectangle
if index == 0:
if feature_type == 'positive':
points_rectangle = [[left_bound, 0],
[right_bound, 0],
[right_bound, width_bar],
[left_bound, width_bar],
[left_bound + separator_indent, (width_bar / 2)]
]
else:
points_rectangle = [[right_bound, 0],
[left_bound, 0],
[left_bound, width_bar],
[right_bound, width_bar],
[right_bound + separator_indent, (width_bar / 2)]
]
else:
points_rectangle = [[left_bound, 0],
[right_bound, 0],
[right_bound + separator_indent * 0.90, (width_bar / 2)],
[right_bound, width_bar],
[left_bound, width_bar],
[left_bound + separator_indent * 0.90, (width_bar / 2)]]
line = plt.Polygon(points_rectangle, closed=True, fill=True,
facecolor=colors[0], linewidth=0)
rectangle_list += [line]
# Create seperator
points_separator = [[separator_pos, 0],
[separator_pos + separator_indent, (width_bar / 2)],
[separator_pos, width_bar]]
line = plt.Polygon(points_separator, closed=None, fill=None,
edgecolor=colors[1], lw=3)
separator_list += [line]
return rectangle_list, separator_list
|
Draw the bars and separators.
|
def reduce_object_file_names(self, dirn):
"""Recursively renames all files named XXX.cpython-...-linux-gnu.so"
to "XXX.so", i.e. removing the erroneous architecture name
coming from the local system.
"""
py_so_files = shprint(sh.find, dirn, '-iname', '*.so')
filens = py_so_files.stdout.decode('utf-8').split('\n')[:-1]
for filen in filens:
file_dirname, file_basename = split(filen)
parts = file_basename.split('.')
if len(parts) <= 2:
continue
shprint(sh.mv, filen, join(file_dirname, parts[0] + '.so'))
|
Recursively renames all files named XXX.cpython-...-linux-gnu.so"
to "XXX.so", i.e. removing the erroneous architecture name
coming from the local system.
|
def restore(ctx, filename):
"""Restore the database from a zipped file.
Default is to restore from db dump in loqusdb/resources/
"""
filename = filename or background_path
if not os.path.isfile(filename):
LOG.warning("File {} does not exist. Please point to a valid file".format(filename))
ctx.abort()
call = ['mongorestore', '--gzip', '--db', 'loqusdb', '--archive={}'.format(filename)]
LOG.info('Restoring database from %s', filename)
start_time = datetime.now()
try:
completed = subprocess.run(call, check=True)
except subprocess.CalledProcessError as err:
LOG.warning(err)
ctx.abort()
LOG.info('Database restored succesfully')
LOG.info('Time to restore database: {0}'.format(datetime.now()-start_time))
|
Restore the database from a zipped file.
Default is to restore from db dump in loqusdb/resources/
|
def get_bearing(origin_point, destination_point):
"""
Calculate the bearing between two lat-long points. Each tuple should
represent (lat, lng) as decimal degrees.
Parameters
----------
origin_point : tuple
destination_point : tuple
Returns
-------
bearing : float
the compass bearing in decimal degrees from the origin point
to the destination point
"""
if not (isinstance(origin_point, tuple) and isinstance(destination_point, tuple)):
raise TypeError('origin_point and destination_point must be (lat, lng) tuples')
# get latitudes and the difference in longitude, as radians
lat1 = math.radians(origin_point[0])
lat2 = math.radians(destination_point[0])
diff_lng = math.radians(destination_point[1] - origin_point[1])
# calculate initial bearing from -180 degrees to +180 degrees
x = math.sin(diff_lng) * math.cos(lat2)
y = math.cos(lat1) * math.sin(lat2) - (math.sin(lat1) * math.cos(lat2) * math.cos(diff_lng))
initial_bearing = math.atan2(x, y)
# normalize initial bearing to 0 degrees to 360 degrees to get compass bearing
initial_bearing = math.degrees(initial_bearing)
bearing = (initial_bearing + 360) % 360
return bearing
|
Calculate the bearing between two lat-long points. Each tuple should
represent (lat, lng) as decimal degrees.
Parameters
----------
origin_point : tuple
destination_point : tuple
Returns
-------
bearing : float
the compass bearing in decimal degrees from the origin point
to the destination point
|
def split_by_rand_pct(self, valid_pct:float=0.2, seed:int=None)->'ItemLists':
"Split the items randomly by putting `valid_pct` in the validation set, optional `seed` can be passed."
if valid_pct==0.: return self.split_none()
if seed is not None: np.random.seed(seed)
rand_idx = np.random.permutation(range_of(self))
cut = int(valid_pct * len(self))
return self.split_by_idx(rand_idx[:cut])
|
Split the items randomly by putting `valid_pct` in the validation set, optional `seed` can be passed.
|
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncListContext for this SyncListInstance
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListContext
"""
if self._context is None:
self._context = SyncListContext(
self._version,
service_sid=self._solution['service_sid'],
sid=self._solution['sid'],
)
return self._context
|
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: SyncListContext for this SyncListInstance
:rtype: twilio.rest.sync.v1.service.sync_list.SyncListContext
|
def scan_keys(self, match=None, count=None):
"""Take a pattern expected by the redis `scan` command and iter on all matching keys
Parameters
----------
match: str
The pattern of keys to look for
count: int, default to None (redis uses 10)
Hint for redis about the number of expected result
Yields
-------
str
All keys found by the scan, one by one. A key can be returned multiple times, it's
related to the way the SCAN command works in redis.
"""
cursor = 0
while True:
cursor, keys = self.connection.scan(cursor, match=match, count=count)
for key in keys:
yield key
if not cursor or cursor == '0': # string for redis.py < 2.10
break
|
Take a pattern expected by the redis `scan` command and iter on all matching keys
Parameters
----------
match: str
The pattern of keys to look for
count: int, default to None (redis uses 10)
Hint for redis about the number of expected result
Yields
-------
str
All keys found by the scan, one by one. A key can be returned multiple times, it's
related to the way the SCAN command works in redis.
|
def _quote(data):
"""Prepare a string for quoting for DIGEST-MD5 challenge or response.
Don't add the quotes, only escape '"' and "\\" with backslashes.
:Parameters:
- `data`: a raw string.
:Types:
- `data`: `bytes`
:return: `data` with '"' and "\\" escaped using "\\".
:returntype: `bytes`
"""
data = data.replace(b'\\', b'\\\\')
data = data.replace(b'"', b'\\"')
return data
|
Prepare a string for quoting for DIGEST-MD5 challenge or response.
Don't add the quotes, only escape '"' and "\\" with backslashes.
:Parameters:
- `data`: a raw string.
:Types:
- `data`: `bytes`
:return: `data` with '"' and "\\" escaped using "\\".
:returntype: `bytes`
|
def ipv6_range_to_list(start_packed, end_packed):
""" Return a list of IPv6 entries from start_packed to end_packed. """
new_list = list()
start = int(binascii.hexlify(start_packed), 16)
end = int(binascii.hexlify(end_packed), 16)
for value in range(start, end + 1):
high = value >> 64
low = value & ((1 << 64) - 1)
new_ip = inet_ntop(socket.AF_INET6,
struct.pack('!2Q', high, low))
new_list.append(new_ip)
return new_list
|
Return a list of IPv6 entries from start_packed to end_packed.
|
def ReleaseSW(self):
' Go away from Limit Switch '
while self.ReadStatusBit(2) == 1: # is Limit Switch ON ?
spi.SPI_write(self.CS, [0x92, 0x92] | (~self.Dir & 1)) # release SW
while self.IsBusy():
pass
self.MoveWait(10)
|
Go away from Limit Switch
|
def fix_e112(self, result):
"""Fix under-indented comments."""
line_index = result['line'] - 1
target = self.source[line_index]
if not target.lstrip().startswith('#'):
# Don't screw with invalid syntax.
return []
self.source[line_index] = self.indent_word + target
|
Fix under-indented comments.
|
def status(url="http://127.0.0.1/status"):
"""
Return the data from an Nginx status page as a dictionary.
http://wiki.nginx.org/HttpStubStatusModule
url
The URL of the status page. Defaults to 'http://127.0.0.1/status'
CLI Example:
.. code-block:: bash
salt '*' nginx.status
"""
resp = _urlopen(url)
status_data = resp.read()
resp.close()
lines = status_data.splitlines()
if not len(lines) == 4:
return
# "Active connections: 1 "
active_connections = lines[0].split()[2]
# "server accepts handled requests"
# " 12 12 9 "
accepted, handled, requests = lines[2].split()
# "Reading: 0 Writing: 1 Waiting: 0 "
_, reading, _, writing, _, waiting = lines[3].split()
return {
'active connections': int(active_connections),
'accepted': int(accepted),
'handled': int(handled),
'requests': int(requests),
'reading': int(reading),
'writing': int(writing),
'waiting': int(waiting),
}
|
Return the data from an Nginx status page as a dictionary.
http://wiki.nginx.org/HttpStubStatusModule
url
The URL of the status page. Defaults to 'http://127.0.0.1/status'
CLI Example:
.. code-block:: bash
salt '*' nginx.status
|
def fit(self, inputs=None, wait=True, logs=True, job_name=None):
"""Train a model using the input training dataset.
The API calls the Amazon SageMaker CreateTrainingJob API to start model training.
The API uses configuration you provided to create the estimator and the
specified input training data to send the CreatingTrainingJob request to Amazon SageMaker.
This is a synchronous operation. After the model training successfully completes,
you can call the ``deploy()`` method to host the model using the Amazon SageMaker hosting services.
Args:
inputs (str or dict or sagemaker.session.s3_input): Information about the training data.
This can be one of three types:
* (str) the S3 location where training data is saved.
* (dict[str, str] or dict[str, sagemaker.session.s3_input]) If using multiple channels for
training data, you can specify a dict mapping channel names
to strings or :func:`~sagemaker.session.s3_input` objects.
* (sagemaker.session.s3_input) - channel configuration for S3 data sources that can provide
additional information as well as the path to the training dataset.
See :func:`sagemaker.session.s3_input` for full details.
wait (bool): Whether the call should wait until the job completes (default: True).
logs (bool): Whether to show the logs produced by the job.
Only meaningful when wait is True (default: True).
job_name (str): Training job name. If not specified, the estimator generates a default job name,
based on the training image name and current timestamp.
"""
self._prepare_for_training(job_name=job_name)
self.latest_training_job = _TrainingJob.start_new(self, inputs)
if wait:
self.latest_training_job.wait(logs=logs)
|
Train a model using the input training dataset.
The API calls the Amazon SageMaker CreateTrainingJob API to start model training.
The API uses configuration you provided to create the estimator and the
specified input training data to send the CreatingTrainingJob request to Amazon SageMaker.
This is a synchronous operation. After the model training successfully completes,
you can call the ``deploy()`` method to host the model using the Amazon SageMaker hosting services.
Args:
inputs (str or dict or sagemaker.session.s3_input): Information about the training data.
This can be one of three types:
* (str) the S3 location where training data is saved.
* (dict[str, str] or dict[str, sagemaker.session.s3_input]) If using multiple channels for
training data, you can specify a dict mapping channel names
to strings or :func:`~sagemaker.session.s3_input` objects.
* (sagemaker.session.s3_input) - channel configuration for S3 data sources that can provide
additional information as well as the path to the training dataset.
See :func:`sagemaker.session.s3_input` for full details.
wait (bool): Whether the call should wait until the job completes (default: True).
logs (bool): Whether to show the logs produced by the job.
Only meaningful when wait is True (default: True).
job_name (str): Training job name. If not specified, the estimator generates a default job name,
based on the training image name and current timestamp.
|
def dcshift(self, shift=0.0):
'''Apply a DC shift to the audio.
Parameters
----------
shift : float
Amount to shift audio between -2 and 2. (Audio is between -1 and 1)
See Also
--------
highpass
'''
if not is_number(shift) or shift < -2 or shift > 2:
raise ValueError('shift must be a number between -2 and 2.')
effect_args = ['dcshift', '{:f}'.format(shift)]
self.effects.extend(effect_args)
self.effects_log.append('dcshift')
return self
|
Apply a DC shift to the audio.
Parameters
----------
shift : float
Amount to shift audio between -2 and 2. (Audio is between -1 and 1)
See Also
--------
highpass
|
def options(self, parser, env):
"""Register commandline options.
"""
parser.add_option('--collect-only',
action='store_true',
dest=self.enableOpt,
default=env.get('NOSE_COLLECT_ONLY'),
help="Enable collect-only: %s [COLLECT_ONLY]" %
(self.help()))
|
Register commandline options.
|
def gridmake(*arrays):
"""
Expands one or more vectors (or matrices) into a matrix where rows span the
cartesian product of combinations of the input arrays. Each column of the
input arrays will correspond to one column of the output matrix.
Parameters
----------
*arrays : tuple/list of np.ndarray
Tuple/list of vectors to be expanded.
Returns
-------
out : np.ndarray
The cartesian product of combinations of the input arrays.
Notes
-----
Based of original function ``gridmake`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational Economics
and Finance, MIT Press, 2002.
"""
if all([i.ndim == 1 for i in arrays]):
d = len(arrays)
if d == 2:
out = _gridmake2(*arrays)
else:
out = _gridmake2(arrays[0], arrays[1])
for arr in arrays[2:]:
out = _gridmake2(out, arr)
return out
else:
raise NotImplementedError("Come back here")
|
Expands one or more vectors (or matrices) into a matrix where rows span the
cartesian product of combinations of the input arrays. Each column of the
input arrays will correspond to one column of the output matrix.
Parameters
----------
*arrays : tuple/list of np.ndarray
Tuple/list of vectors to be expanded.
Returns
-------
out : np.ndarray
The cartesian product of combinations of the input arrays.
Notes
-----
Based of original function ``gridmake`` in CompEcon toolbox by
Miranda and Fackler
References
----------
Miranda, Mario J, and Paul L Fackler. Applied Computational Economics
and Finance, MIT Press, 2002.
|
def _process_dependencies(self, anexec, contents, mode="insert"):
"""Extracts a list of subroutines and functions that are called from
within this executable.
:arg mode: specifies whether the matches should be added, removed
or merged into the specified executable.
"""
#At this point we don't necessarily know which module the executables are
#in, so we just extract the names. Once all the modules in the library
#have been parsed, we can do the associations at that level for linking.
for dmatch in self.RE_DEPEND.finditer(contents):
isSubroutine = dmatch.group("sub") is not None
if "!" in dmatch.group("exec"):
execline = self._depend_exec_clean(dmatch.group("exec"))
else:
execline = "(" + dmatch.group("exec").split("!")[0].replace(",", ", ") + ")"
if not "::" in execline:
try:
dependent = self.nester.parseString(execline).asList()[0]
except:
msg.err("parsing executable dependency call {}".format(anexec.name))
msg.gen("\t" + execline)
#Sometimes the parameter passed to a subroutine or function is
#itself a function call. These are always the first elements in
#their nested lists.
self._process_dependlist(dependent, anexec, isSubroutine, mode)
|
Extracts a list of subroutines and functions that are called from
within this executable.
:arg mode: specifies whether the matches should be added, removed
or merged into the specified executable.
|
def stop(self):
"""Stops the background synchronization thread"""
with self.synclock:
if self.syncthread is not None:
self.syncthread.cancel()
self.syncthread = None
|
Stops the background synchronization thread
|
def addLogicalInterfaceToDeviceType(self, typeId, logicalInterfaceId):
"""
Adds a logical interface to a device type.
Parameters:
- typeId (string) - the device type
- logicalInterfaceId (string) - the id returned by the platform on creation of the logical interface
- description (string) - optional (not used)
Throws APIException on failure.
"""
req = ApiClient.allDeviceTypeLogicalInterfacesUrl % (self.host, "/draft", typeId)
body = {"id" : logicalInterfaceId}
# body = {"name" : "required but not used!!!", "id" : logicalInterfaceId, "schemaId" : schemaId}
# if description:
# body["description"] = description
resp = requests.post(req, auth=self.credentials, headers={"Content-Type":"application/json"}, data=json.dumps(body),
verify=self.verify)
if resp.status_code == 201:
self.logger.debug("Logical interface added to a device type")
else:
raise ibmiotf.APIException(resp.status_code, "HTTP error adding logical interface to a device type", resp)
return resp.json()
|
Adds a logical interface to a device type.
Parameters:
- typeId (string) - the device type
- logicalInterfaceId (string) - the id returned by the platform on creation of the logical interface
- description (string) - optional (not used)
Throws APIException on failure.
|
def create_tag(self, tags):
"""Create tags for a Point in the language you specify. Tags can only contain alphanumeric (unicode) characters
and the underscore. Tags will be stored lower-cased.
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
tags (mandatory) (list) - the list of tags you want to add to your Point, e.g.
["garden", "soil"]
"""
if isinstance(tags, str):
tags = [tags]
evt = self._client._request_point_tag_update(self._type, self.__lid, self.__pid, tags, delete=False)
self._client._wait_and_except_if_failed(evt)
|
Create tags for a Point in the language you specify. Tags can only contain alphanumeric (unicode) characters
and the underscore. Tags will be stored lower-cased.
Raises [IOTException](./Exceptions.m.html#IoticAgent.IOT.Exceptions.IOTException)
containing the error if the infrastructure detects a problem
Raises [LinkException](../Core/AmqpLink.m.html#IoticAgent.Core.AmqpLink.LinkException)
if there is a communications problem between you and the infrastructure
tags (mandatory) (list) - the list of tags you want to add to your Point, e.g.
["garden", "soil"]
|
def joinCommissioned(self, strPSKd='threadjpaketest', waitTime=20):
"""start joiner
Args:
strPSKd: Joiner's PSKd
Returns:
True: successful to start joiner
False: fail to start joiner
"""
print '%s call joinCommissioned' % self.port
self.__sendCommand('ifconfig up')
cmd = 'joiner start %s %s' %(strPSKd, self.provisioningUrl)
print cmd
if self.__sendCommand(cmd)[0] == "Done":
maxDuration = 150 # seconds
self.joinCommissionedStatus = self.joinStatus['ongoing']
if self.logThreadStatus == self.logStatus['stop']:
self.logThread = ThreadRunner.run(target=self.__readCommissioningLogs, args=(maxDuration,))
t_end = time.time() + maxDuration
while time.time() < t_end:
if self.joinCommissionedStatus == self.joinStatus['succeed']:
break
elif self.joinCommissionedStatus == self.joinStatus['failed']:
return False
time.sleep(1)
self.__sendCommand('thread start')
time.sleep(30)
return True
else:
return False
|
start joiner
Args:
strPSKd: Joiner's PSKd
Returns:
True: successful to start joiner
False: fail to start joiner
|
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
From django.utils.encoding.py in 1.4.2+, minus the dependency on Six.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
if sys.version_info[0] == 2:
if '__str__' not in klass.__dict__:
raise ValueError("@python_2_unicode_compatible cannot be applied "
"to %s because it doesn't define __str__()." %
klass.__name__)
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
|
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
From django.utils.encoding.py in 1.4.2+, minus the dependency on Six.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
|
def __check(self, decorated_function, *args, **kwargs):
""" Check whether function is a bounded method or not. If check fails then exception is raised
:param decorated_function: called function (original)
:param args: args with which function is called
:param kwargs: kwargs with which function is called
:return: None
"""
# TODO replace this function with decorator which can be turned off like verify_* does
if len(args) >= 1:
obj = args[0]
function_name = decorated_function.__name__
if hasattr(obj, function_name) is True:
fn = getattr(obj, function_name)
if callable(fn) and fn.__self__ == obj:
return
raise RuntimeError('Only bounded methods are allowed')
|
Check whether function is a bounded method or not. If check fails then exception is raised
:param decorated_function: called function (original)
:param args: args with which function is called
:param kwargs: kwargs with which function is called
:return: None
|
def tokenizer(text):
"""A lexical analyzer for the `mwtab` formatted files.
:param str text: `mwtab` formatted text.
:return: Tuples of data.
:rtype: py:class:`~collections.namedtuple`
"""
stream = deque(text.split("\n"))
while len(stream) > 0:
line = stream.popleft()
if line.startswith("#METABOLOMICS WORKBENCH"):
yield KeyValue("#METABOLOMICS WORKBENCH", "\n")
yield KeyValue("HEADER", line)
for identifier in line.split(" "):
if ":" in identifier:
key, value = identifier.split(":")
yield KeyValue(key, value)
elif line.startswith("#ANALYSIS TYPE"):
yield KeyValue("HEADER", line)
elif line.startswith("#SUBJECT_SAMPLE_FACTORS:"):
yield KeyValue("#ENDSECTION", "\n")
yield KeyValue("#SUBJECT_SAMPLE_FACTORS", "\n")
elif line.startswith("#"):
yield KeyValue("#ENDSECTION", "\n")
yield KeyValue(line.strip(), "\n")
elif line.startswith("SUBJECT_SAMPLE_FACTORS"):
key, subject_type, local_sample_id, factors, additional_sample_data = line.split("\t")
# factors = [dict([[i.strip() for i in f.split(":")]]) for f in factors.split("|")]
yield SubjectSampleFactors(key.strip(), subject_type, local_sample_id, factors, additional_sample_data)
elif line.endswith("_START"):
yield KeyValue(line, "\n")
while not line.endswith("_END"):
line = stream.popleft()
if line.endswith("_END"):
yield KeyValue(line.strip(), "\n")
else:
data = line.split("\t")
yield KeyValue(data[0], tuple(data))
else:
if line:
if line.startswith("MS:MS_RESULTS_FILE") or line.startswith("NM:NMR_RESULTS_FILE"):
try:
key, value, extra = line.split("\t")
extra_key, extra_value = extra.strip().split(":")
yield KeyValueExtra(key.strip()[3:], value, extra_key, extra_value)
except ValueError:
key, value = line.split("\t")
yield KeyValue(key.strip()[3:], value)
else:
try:
key, value = line.split("\t")
if ":" in key:
if key.startswith("MS_METABOLITE_DATA:UNITS"):
yield KeyValue(key.strip(), value)
else:
yield KeyValue(key.strip()[3:], value)
else:
yield KeyValue(key.strip(), value)
except ValueError:
print("LINE WITH ERROR:\n\t", repr(line))
raise
yield KeyValue("#ENDSECTION", "\n")
yield KeyValue("!#ENDFILE", "\n")
|
A lexical analyzer for the `mwtab` formatted files.
:param str text: `mwtab` formatted text.
:return: Tuples of data.
:rtype: py:class:`~collections.namedtuple`
|
def ReadHashes(self):
"""
Read Hash values from the stream.
Returns:
list: a list of hash values. Each value is of the bytearray type.
"""
len = self.ReadVarInt()
items = []
for i in range(0, len):
ba = bytearray(self.ReadBytes(32))
ba.reverse()
items.append(ba.hex())
return items
|
Read Hash values from the stream.
Returns:
list: a list of hash values. Each value is of the bytearray type.
|
def verify_leaf_hash_inclusion(self, leaf_hash: bytes, leaf_index: int,
proof: List[bytes], sth: STH):
"""Verify a Merkle Audit Path.
See section 2.1.1 of RFC6962 for the exact path description.
Args:
leaf_hash: The hash of the leaf for which the proof was provided.
leaf_index: Index of the leaf in the tree.
proof: A list of SHA-256 hashes representing the Merkle audit
path.
sth: STH with the same tree size as the one used to fetch the
proof.
The sha256_root_hash from this STH will be compared against the
root hash produced from the proof.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ProofError: the proof is invalid.
"""
leaf_index = int(leaf_index)
tree_size = int(sth.tree_size)
# TODO(eranm): Verify signature over STH
if tree_size <= leaf_index:
raise ValueError("Provided STH is for a tree that is smaller "
"than the leaf index. Tree size: %d Leaf "
"index: %d" % (tree_size, leaf_index))
if tree_size < 0 or leaf_index < 0:
raise ValueError("Negative tree size or leaf index: "
"Tree size: %d Leaf index: %d" %
(tree_size, leaf_index))
calculated_root_hash = self._calculate_root_hash_from_audit_path(
leaf_hash, leaf_index, proof[:], tree_size)
if calculated_root_hash == sth.sha256_root_hash:
return True
raise error.ProofError("Constructed root hash differs from provided "
"root hash. Constructed: %s Expected: %s" %
(hexlify(calculated_root_hash).strip(),
hexlify(sth.sha256_root_hash).strip()))
|
Verify a Merkle Audit Path.
See section 2.1.1 of RFC6962 for the exact path description.
Args:
leaf_hash: The hash of the leaf for which the proof was provided.
leaf_index: Index of the leaf in the tree.
proof: A list of SHA-256 hashes representing the Merkle audit
path.
sth: STH with the same tree size as the one used to fetch the
proof.
The sha256_root_hash from this STH will be compared against the
root hash produced from the proof.
Returns:
True. The return value is enforced by a decorator and need not be
checked by the caller.
Raises:
ProofError: the proof is invalid.
|
def upsert_many(col, data):
"""
Only used when having "_id" field.
**中文文档**
要求 ``data`` 中的每一个 ``document`` 都必须有 ``_id`` 项。这样才能进行
``upsert`` 操作。
"""
ready_to_insert = list()
for doc in data:
res = col.update({"_id": doc["_id"]}, {"$set": doc}, upsert=False)
# 没有任何数据被修改, 且不是因为数据存在但值相同
if res["nModified"] == 0 and res["updatedExisting"] is False:
ready_to_insert.append(doc)
col.insert(ready_to_insert)
|
Only used when having "_id" field.
**中文文档**
要求 ``data`` 中的每一个 ``document`` 都必须有 ``_id`` 项。这样才能进行
``upsert`` 操作。
|
def zero_pad(m, n=1):
"""Pad a matrix with zeros, on all sides."""
return np.pad(m, (n, n), mode='constant', constant_values=[0])
|
Pad a matrix with zeros, on all sides.
|
def lock(self):
"""Lock thread.
Requires that the currently authenticated user has the modposts oauth
scope or has user/password authentication as a mod of the subreddit.
:returns: The json response from the server.
"""
url = self.reddit_session.config['lock']
data = {'id': self.fullname}
return self.reddit_session.request_json(url, data=data)
|
Lock thread.
Requires that the currently authenticated user has the modposts oauth
scope or has user/password authentication as a mod of the subreddit.
:returns: The json response from the server.
|
def parallel(fsms, test):
'''
Crawl several FSMs in parallel, mapping the states of a larger meta-FSM.
To determine whether a state in the larger FSM is final, pass all of the
finality statuses (e.g. [True, False, False] to `test`.
'''
alphabet = set().union(*[fsm.alphabet for fsm in fsms])
initial = dict([(i, fsm.initial) for (i, fsm) in enumerate(fsms)])
# dedicated function accepts a "superset" and returns the next "superset"
# obtained by following this transition in the new FSM
def follow(current, symbol):
next = {}
for i in range(len(fsms)):
if symbol not in fsms[i].alphabet and anything_else in fsms[i].alphabet:
actual_symbol = anything_else
else:
actual_symbol = symbol
if i in current \
and current[i] in fsms[i].map \
and actual_symbol in fsms[i].map[current[i]]:
next[i] = fsms[i].map[current[i]][actual_symbol]
if len(next.keys()) == 0:
raise OblivionError
return next
# Determine the "is final?" condition of each substate, then pass it to the
# test to determine finality of the overall FSM.
def final(state):
accepts = [i in state and state[i] in fsm.finals for (i, fsm) in enumerate(fsms)]
return test(accepts)
return crawl(alphabet, initial, final, follow).reduce()
|
Crawl several FSMs in parallel, mapping the states of a larger meta-FSM.
To determine whether a state in the larger FSM is final, pass all of the
finality statuses (e.g. [True, False, False] to `test`.
|
def theme(self, value):
"""
Setter for **self.__theme** attribute.
:param value: Attribute value.
:type value: dict
"""
if value is not None:
assert type(value) is dict, "'{0}' attribute: '{1}' type is not 'dict'!".format("theme", value)
self.__theme = value
|
Setter for **self.__theme** attribute.
:param value: Attribute value.
:type value: dict
|
def delete_job(self, job_id):
"""Delete the given job id. The job must have been previously reserved by this connection"""
if hasattr(job_id, 'job_id'):
job_id = job_id.job_id
with self._sock_ctx() as socket:
self._send_message('delete {0}'.format(job_id), socket)
self._receive_word(socket, b'DELETED')
|
Delete the given job id. The job must have been previously reserved by this connection
|
def Popup(*args, **_3to2kwargs):
if 'location' in _3to2kwargs: location = _3to2kwargs['location']; del _3to2kwargs['location']
else: location = (None, None)
if 'keep_on_top' in _3to2kwargs: keep_on_top = _3to2kwargs['keep_on_top']; del _3to2kwargs['keep_on_top']
else: keep_on_top = False
if 'grab_anywhere' in _3to2kwargs: grab_anywhere = _3to2kwargs['grab_anywhere']; del _3to2kwargs['grab_anywhere']
else: grab_anywhere = False
if 'no_titlebar' in _3to2kwargs: no_titlebar = _3to2kwargs['no_titlebar']; del _3to2kwargs['no_titlebar']
else: no_titlebar = False
if 'font' in _3to2kwargs: font = _3to2kwargs['font']; del _3to2kwargs['font']
else: font = None
if 'line_width' in _3to2kwargs: line_width = _3to2kwargs['line_width']; del _3to2kwargs['line_width']
else: line_width = None
if 'icon' in _3to2kwargs: icon = _3to2kwargs['icon']; del _3to2kwargs['icon']
else: icon = DEFAULT_WINDOW_ICON
if 'non_blocking' in _3to2kwargs: non_blocking = _3to2kwargs['non_blocking']; del _3to2kwargs['non_blocking']
else: non_blocking = False
if 'custom_text' in _3to2kwargs: custom_text = _3to2kwargs['custom_text']; del _3to2kwargs['custom_text']
else: custom_text = (None, None)
if 'auto_close_duration' in _3to2kwargs: auto_close_duration = _3to2kwargs['auto_close_duration']; del _3to2kwargs['auto_close_duration']
else: auto_close_duration = None
if 'auto_close' in _3to2kwargs: auto_close = _3to2kwargs['auto_close']; del _3to2kwargs['auto_close']
else: auto_close = False
if 'button_type' in _3to2kwargs: button_type = _3to2kwargs['button_type']; del _3to2kwargs['button_type']
else: button_type = POPUP_BUTTONS_OK
if 'text_color' in _3to2kwargs: text_color = _3to2kwargs['text_color']; del _3to2kwargs['text_color']
else: text_color = None
if 'background_color' in _3to2kwargs: background_color = _3to2kwargs['background_color']; del _3to2kwargs['background_color']
else: background_color = None
if 'button_color' in _3to2kwargs: button_color = _3to2kwargs['button_color']; del _3to2kwargs['button_color']
else: button_color = None
if 'title' in _3to2kwargs: title = _3to2kwargs['title']; del _3to2kwargs['title']
else: title = None
"""
Popup - Display a popup box with as many parms as you wish to include
:param args:
:param button_color:
:param background_color:
:param text_color:
:param button_type:
:param auto_close:
:param auto_close_duration:
:param non_blocking:
:param icon:
:param line_width:
:param font:
:param no_titlebar:
:param grab_anywhere:
:param keep_on_top:
:param location:
:return:
"""
if not args:
args_to_print = ['']
else:
args_to_print = args
if line_width != None:
local_line_width = line_width
else:
local_line_width = MESSAGE_BOX_LINE_WIDTH
_title = title if title is not None else args_to_print[0]
window = Window(_title, auto_size_text=True, background_color=background_color, button_color=button_color,
auto_close=auto_close, auto_close_duration=auto_close_duration, icon=icon, font=font,
no_titlebar=no_titlebar, grab_anywhere=grab_anywhere, keep_on_top=keep_on_top, location=location)
max_line_total, total_lines = 0, 0
for message in args_to_print:
# fancy code to check if string and convert if not is not need. Just always convert to string :-)
# if not isinstance(message, str): message = str(message)
message = str(message)
if message.count('\n'):
message_wrapped = message
else:
message_wrapped = textwrap.fill(message, local_line_width)
message_wrapped_lines = message_wrapped.count('\n') + 1
longest_line_len = max([len(l) for l in message.split('\n')])
width_used = min(longest_line_len, local_line_width)
max_line_total = max(max_line_total, width_used)
# height = _GetNumLinesNeeded(message, width_used)
height = message_wrapped_lines
window.AddRow(
Text(message_wrapped, auto_size_text=True, text_color=text_color, background_color=background_color))
total_lines += height
if non_blocking:
PopupButton = DummyButton # important to use or else button will close other windows too!
else:
PopupButton = CloseButton
# show either an OK or Yes/No depending on paramater
if custom_text != (None, None):
if type(custom_text) is not tuple:
window.AddRow(PopupButton(custom_text,size=(len(custom_text),1), button_color=button_color, focus=True, bind_return_key=True))
elif custom_text[1] is None:
window.AddRow(PopupButton(custom_text[0],size=(len(custom_text[0]),1), button_color=button_color, focus=True, bind_return_key=True))
else:
window.AddRow(PopupButton(custom_text[0], button_color=button_color, focus=True, bind_return_key=True, size=(len(custom_text[0]), 1)),
PopupButton(custom_text[1], button_color=button_color, size=(len(custom_text[0]), 1)))
elif button_type is POPUP_BUTTONS_YES_NO:
window.AddRow(PopupButton('Yes', button_color=button_color, focus=True, bind_return_key=True, pad=((20, 5), 3),
size=(5, 1)), PopupButton('No', button_color=button_color, size=(5, 1)))
elif button_type is POPUP_BUTTONS_CANCELLED:
window.AddRow(
PopupButton('Cancelled', button_color=button_color, focus=True, bind_return_key=True, pad=((20, 0), 3)))
elif button_type is POPUP_BUTTONS_ERROR:
window.AddRow(PopupButton('Error', size=(6, 1), button_color=button_color, focus=True, bind_return_key=True,
pad=((20, 0), 3)))
elif button_type is POPUP_BUTTONS_OK_CANCEL:
window.AddRow(PopupButton('OK', size=(6, 1), button_color=button_color, focus=True, bind_return_key=True),
PopupButton('Cancel', size=(6, 1), button_color=button_color))
elif button_type is POPUP_BUTTONS_NO_BUTTONS:
pass
else:
window.AddRow(PopupButton('OK', size=(5, 1), button_color=button_color, focus=True, bind_return_key=True,
pad=((20, 0), 3)))
if non_blocking:
button, values = window.Read(timeout=0)
else:
button, values = window.Read()
return button
|
Popup - Display a popup box with as many parms as you wish to include
:param args:
:param button_color:
:param background_color:
:param text_color:
:param button_type:
:param auto_close:
:param auto_close_duration:
:param non_blocking:
:param icon:
:param line_width:
:param font:
:param no_titlebar:
:param grab_anywhere:
:param keep_on_top:
:param location:
:return:
|
def to_xdr_object(self):
"""Create an XDR object for this :class:`Asset`.
:return: An XDR Asset object
"""
if self.is_native():
xdr_type = Xdr.const.ASSET_TYPE_NATIVE
return Xdr.types.Asset(type=xdr_type)
else:
x = Xdr.nullclass()
length = len(self.code)
pad_length = 4 - length if length <= 4 else 12 - length
x.assetCode = bytearray(self.code, 'ascii') + b'\x00' * pad_length
x.issuer = account_xdr_object(self.issuer)
if length <= 4:
xdr_type = Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM4
return Xdr.types.Asset(type=xdr_type, alphaNum4=x)
else:
xdr_type = Xdr.const.ASSET_TYPE_CREDIT_ALPHANUM12
return Xdr.types.Asset(type=xdr_type, alphaNum12=x)
|
Create an XDR object for this :class:`Asset`.
:return: An XDR Asset object
|
def rest_put(url, data, timeout):
'''Call rest put method'''
try:
response = requests.put(url, headers={'Accept': 'application/json', 'Content-Type': 'application/json'},\
data=data, timeout=timeout)
return response
except Exception as e:
print('Get exception {0} when sending http put to url {1}'.format(str(e), url))
return None
|
Call rest put method
|
def get_listening(self, listen=['0.0.0.0']):
"""Returns a list of addresses SSH can list on
Turns input into a sensible list of IPs SSH can listen on. Input
must be a python list of interface names, IPs and/or CIDRs.
:param listen: list of IPs, CIDRs, interface names
:returns: list of IPs available on the host
"""
if listen == ['0.0.0.0']:
return listen
value = []
for network in listen:
try:
ip = get_address_in_network(network=network, fatal=True)
except ValueError:
if is_ip(network):
ip = network
else:
try:
ip = get_iface_addr(iface=network, fatal=False)[0]
except IndexError:
continue
value.append(ip)
if value == []:
return ['0.0.0.0']
return value
|
Returns a list of addresses SSH can list on
Turns input into a sensible list of IPs SSH can listen on. Input
must be a python list of interface names, IPs and/or CIDRs.
:param listen: list of IPs, CIDRs, interface names
:returns: list of IPs available on the host
|
def combineblocks(blks, imgsz, stpsz=None, fn=np.median):
"""Combine blocks from an ndarray to reconstruct ndarray signal.
Parameters
----------
blks : ndarray
nd array of blocks of a signal
imgsz : tuple
tuple of the signal size
stpsz : tuple, optional (default None, corresponds to steps of 1)
tuple of step sizes between neighboring blocks
fn : function, optional (default np.median)
the function used to resolve multivalued cells
Returns
-------
imgs : ndarray
reconstructed signal, unknown pixels are returned as np.nan
"""
# Construct a vectorized append function
def listapp(x, y):
x.append(y)
veclistapp = np.vectorize(listapp, otypes=[np.object_])
blksz = blks.shape[:-1]
if stpsz is None:
stpsz = tuple(1 for _ in blksz)
# Calculate the number of blocks that can fit in each dimension of
# the images
numblocks = tuple(int(np.floor((a-b)/c) + 1) for a, b, c in
zip_longest(imgsz, blksz, stpsz, fillvalue=1))
new_shape = blksz + numblocks
blks = np.reshape(blks, new_shape)
# Construct an imgs matrix of empty lists
imgs = np.empty(imgsz, dtype=np.object_)
imgs.fill([])
imgs = np.frompyfunc(list, 1, 1)(imgs)
# Iterate over each block and append the values to the corresponding
# imgs cell
for pos in np.ndindex(numblocks):
slices = tuple(slice(a*c, a*c + b) for a, b, c in
zip_longest(pos, blksz, stpsz, fillvalue=1))
veclistapp(imgs[slices].squeeze(), blks[(Ellipsis, ) + pos].squeeze())
return np.vectorize(fn, otypes=[blks.dtype])(imgs)
|
Combine blocks from an ndarray to reconstruct ndarray signal.
Parameters
----------
blks : ndarray
nd array of blocks of a signal
imgsz : tuple
tuple of the signal size
stpsz : tuple, optional (default None, corresponds to steps of 1)
tuple of step sizes between neighboring blocks
fn : function, optional (default np.median)
the function used to resolve multivalued cells
Returns
-------
imgs : ndarray
reconstructed signal, unknown pixels are returned as np.nan
|
def conditional(self, condition, requirements):
"""Calculates conditional requirements for multiple requirements
at once. This is a shorthand to be reduce duplication and to
keep an inline declarative syntax. For example:
lib x : x.cpp : [ conditional <toolset>gcc <variant>debug :
<define>DEBUG_EXCEPTION <define>DEBUG_TRACE ] ;
"""
assert is_iterable_typed(condition, basestring)
assert is_iterable_typed(requirements, basestring)
c = string.join(condition, ",")
if c.find(":") != -1:
return [c + r for r in requirements]
else:
return [c + ":" + r for r in requirements]
|
Calculates conditional requirements for multiple requirements
at once. This is a shorthand to be reduce duplication and to
keep an inline declarative syntax. For example:
lib x : x.cpp : [ conditional <toolset>gcc <variant>debug :
<define>DEBUG_EXCEPTION <define>DEBUG_TRACE ] ;
|
def t_KEYWORD_AS_TAG(self, t):
r'[a-zA-Z]+'
t.type = self.reserved.get(t.value, 'UNKNOWN_TAG')
t.value = t.value.strip()
return t
|
r'[a-zA-Z]+
|
def BROKER_TYPE(self):
"""Custom setting allowing switch between rabbitmq, redis"""
broker_type = get('BROKER_TYPE', DEFAULT_BROKER_TYPE)
if broker_type not in SUPPORTED_BROKER_TYPES:
log.warn("Specified BROKER_TYPE {} not supported. Backing to default {}".format(
broker_type, DEFAULT_BROKER_TYPE))
return DEFAULT_BROKER_TYPE
else:
return broker_type
|
Custom setting allowing switch between rabbitmq, redis
|
def asDictionary(self):
""" returns the data source as a dictionary """
return {
"type": "joinTable",
"leftTableSource": self._leftTableSource,
"rightTableSource": self._rightTableSource,
"leftTableKey": self._leftTableKey,
"rightTableKey": self._rightTableKey,
"joinType": self._joinType
}
|
returns the data source as a dictionary
|
def build_import_keychain( cls, keychain_dir, namespace_id, pubkey_hex ):
"""
Generate all possible NAME_IMPORT addresses from the NAMESPACE_REVEAL public key
"""
pubkey_addr = virtualchain.BitcoinPublicKey(str(pubkey_hex)).address()
# do we have a cached one on disk?
cached_keychain = cls.get_import_keychain_path(keychain_dir, namespace_id)
if os.path.exists( cached_keychain ):
child_addrs = []
try:
lines = []
with open(cached_keychain, "r") as f:
lines = f.readlines()
child_attrs = [l.strip() for l in lines]
log.debug("Loaded cached import keychain for '%s' (%s)" % (pubkey_hex, pubkey_addr))
return child_attrs
except Exception, e:
log.exception(e)
pass
pubkey_hex = str(pubkey_hex)
public_keychain = keychain.PublicKeychain.from_public_key( pubkey_hex )
child_addrs = []
for i in xrange(0, NAME_IMPORT_KEYRING_SIZE):
public_child = public_keychain.child(i)
public_child_address = public_child.address()
# if we're on testnet, then re-encode as a testnet address
if virtualchain.version_byte == 111:
old_child_address = public_child_address
public_child_address = virtualchain.hex_hash160_to_address( virtualchain.address_to_hex_hash160( public_child_address ) )
log.debug("Re-encode '%s' to '%s'" % (old_child_address, public_child_address))
child_addrs.append( public_child_address )
if i % 20 == 0 and i != 0:
log.debug("%s children..." % i)
# include this address
child_addrs.append( pubkey_addr )
log.debug("Done building import keychain for '%s' (%s)" % (pubkey_hex, pubkey_addr))
# cache
try:
with open(cached_keychain, "w+") as f:
for addr in child_addrs:
f.write("%s\n" % addr)
f.flush()
log.debug("Cached keychain to '%s'" % cached_keychain)
except Exception, e:
log.exception(e)
log.error("Unable to cache keychain for '%s' (%s)" % (pubkey_hex, pubkey_addr))
return child_addrs
|
Generate all possible NAME_IMPORT addresses from the NAMESPACE_REVEAL public key
|
def loaders(self): # pragma: no cover
"""Return available loaders"""
if self.LOADERS_FOR_DYNACONF in (None, 0, "0", "false", False):
self.logger.info("No loader defined")
return []
if not self._loaders:
for loader_module_name in self.LOADERS_FOR_DYNACONF:
loader = importlib.import_module(loader_module_name)
self._loaders.append(loader)
return self._loaders
|
Return available loaders
|
def AUC_analysis(AUC):
"""
Analysis AUC with interpretation table.
:param AUC: area under the ROC curve
:type AUC : float
:return: interpretation result as str
"""
try:
if AUC == "None":
return "None"
if AUC < 0.6:
return "Poor"
if AUC >= 0.6 and AUC < 0.7:
return "Fair"
if AUC >= 0.7 and AUC < 0.8:
return "Good"
if AUC >= 0.8 and AUC < 0.9:
return "Very Good"
return "Excellent"
except Exception: # pragma: no cover
return "None"
|
Analysis AUC with interpretation table.
:param AUC: area under the ROC curve
:type AUC : float
:return: interpretation result as str
|
def resample_ann(resampled_t, ann_sample):
"""
Compute the new annotation indices
Parameters
----------
resampled_t : numpy array
Array of signal locations as returned by scipy.signal.resample
ann_sample : numpy array
Array of annotation locations
Returns
-------
resampled_ann_sample : numpy array
Array of resampled annotation locations
"""
tmp = np.zeros(len(resampled_t), dtype='int16')
j = 0
tprec = resampled_t[j]
for i, v in enumerate(ann_sample):
while True:
d = False
if v < tprec:
j -= 1
tprec = resampled_t[j]
if j+1 == len(resampled_t):
tmp[j] += 1
break
tnow = resampled_t[j+1]
if tprec <= v and v <= tnow:
if v-tprec < tnow-v:
tmp[j] += 1
else:
tmp[j+1] += 1
d = True
j += 1
tprec = tnow
if d:
break
idx = np.where(tmp>0)[0].astype('int64')
res = []
for i in idx:
for j in range(tmp[i]):
res.append(i)
assert len(res) == len(ann_sample)
return np.asarray(res, dtype='int64')
|
Compute the new annotation indices
Parameters
----------
resampled_t : numpy array
Array of signal locations as returned by scipy.signal.resample
ann_sample : numpy array
Array of annotation locations
Returns
-------
resampled_ann_sample : numpy array
Array of resampled annotation locations
|
def _safe_run_theta(input_file, out_dir, output_ext, args, data):
"""Run THetA, catching and continuing on any errors.
"""
out_file = os.path.join(out_dir, _split_theta_ext(input_file) + output_ext)
skip_file = out_file + ".skipped"
if utils.file_exists(skip_file):
return None
if not utils.file_exists(out_file):
with file_transaction(data, out_dir) as tx_out_dir:
utils.safe_makedir(tx_out_dir)
cmd = _get_cmd("RunTHetA.py") + args + \
[input_file, "--NUM_PROCESSES", dd.get_cores(data),
"--FORCE", "-d", tx_out_dir]
try:
do.run(cmd, "Run THetA to calculate purity", log_error=False)
except subprocess.CalledProcessError as msg:
if ("Number of intervals must be greater than 1" in str(msg) or
"This sample isn't a good candidate for THetA analysis" in str(msg)):
with open(os.path.join(tx_out_dir, os.path.basename(skip_file)), "w") as out_handle:
out_handle.write("Expected TheTA failure, skipping")
return None
else:
raise
return out_file
|
Run THetA, catching and continuing on any errors.
|
def search(self, query, indices=None, doc_types=None, model=None, scan=False, headers=None, **query_params):
"""Execute a search against one or more indices to get the resultset.
`query` must be a Search object, a Query object, or a custom
dictionary of search parameters using the query DSL to be passed
directly.
"""
if isinstance(query, Search):
search = query
elif isinstance(query, (Query, dict)):
search = Search(query)
else:
raise InvalidQuery("search() must be supplied with a Search or Query object, or a dict")
if scan:
query_params.setdefault("search_type", "scan")
query_params.setdefault("scroll", "10m")
return ResultSet(self, search, indices=indices, doc_types=doc_types,
model=model, query_params=query_params, headers=headers)
|
Execute a search against one or more indices to get the resultset.
`query` must be a Search object, a Query object, or a custom
dictionary of search parameters using the query DSL to be passed
directly.
|
def junos_call(fun, *args, **kwargs):
'''
.. versionadded:: 2019.2.0
Execute an arbitrary function from the
:mod:`junos execution module <salt.module.junos>`. To check what ``args``
and ``kwargs`` you must send to the function, please consult the appropriate
documentation.
fun
The name of the function. E.g., ``set_hostname``.
args
List of arguments to send to the ``junos`` function invoked.
kwargs
Dictionary of key-value arguments to send to the ``juno`` function
invoked.
CLI Example:
.. code-block:: bash
salt '*' napalm.junos_fun cli 'show system commit'
'''
prep = _junos_prep_fun(napalm_device) # pylint: disable=undefined-variable
if not prep['result']:
return prep
if 'junos.' not in fun:
mod_fun = 'junos.{}'.format(fun)
else:
mod_fun = fun
if mod_fun not in __salt__:
return {
'out': None,
'result': False,
'comment': '{} is not a valid function'.format(fun)
}
return __salt__[mod_fun](*args, **kwargs)
|
.. versionadded:: 2019.2.0
Execute an arbitrary function from the
:mod:`junos execution module <salt.module.junos>`. To check what ``args``
and ``kwargs`` you must send to the function, please consult the appropriate
documentation.
fun
The name of the function. E.g., ``set_hostname``.
args
List of arguments to send to the ``junos`` function invoked.
kwargs
Dictionary of key-value arguments to send to the ``juno`` function
invoked.
CLI Example:
.. code-block:: bash
salt '*' napalm.junos_fun cli 'show system commit'
|
def getTransitionUsers(obj, action_id, last_user=False):
"""
This function returns a list with the users who have done the transition.
:action_id: a sring as the transition id.
:last_user: a boolean to return only the last user triggering the
transition or all of them.
:returns: a list of user ids.
"""
workflow = getToolByName(obj, 'portal_workflow')
users = []
try:
# https://jira.bikalabs.com/browse/LIMS-2242:
# Sometimes the workflow history is inexplicably missing!
review_history = list(workflow.getInfoFor(obj, 'review_history'))
except WorkflowException:
logger.error(
"workflow history is inexplicably missing."
" https://jira.bikalabs.com/browse/LIMS-2242")
return users
# invert the list, so we always see the most recent matching event
review_history.reverse()
for event in review_history:
if event.get('action', '') == action_id:
value = event.get('actor', '')
users.append(value)
if last_user:
return users
return users
|
This function returns a list with the users who have done the transition.
:action_id: a sring as the transition id.
:last_user: a boolean to return only the last user triggering the
transition or all of them.
:returns: a list of user ids.
|
def get_file(path=None, content=None):
"""
:param path: relative path, or None to get from request
:param content: file content, output in data. Used for editfile
"""
if path is None:
path = request.args.get('path')
if path is None:
return error('No path in request')
filename = os.path.split(path.rstrip('/'))[-1]
extension = filename.rsplit('.', 1)[-1]
os_file_path = web_path_to_os_path(path)
if os.path.isdir(os_file_path):
file_type = 'folder'
# Ensure trailing slash
if path[-1] != '/':
path += '/'
else:
file_type = 'file'
ctime = int(os.path.getctime(os_file_path))
mtime = int(os.path.getmtime(os_file_path))
height = 0
width = 0
if extension in ['gif', 'jpg', 'jpeg', 'png']:
try:
im = PIL.Image.open(os_file_path)
height, width = im.size
except OSError:
log.exception('Error loading image "{}" to get width and height'.format(os_file_path))
attributes = {
'name': filename,
'path': get_url_path(path),
'readable': 1 if os.access(os_file_path, os.R_OK) else 0,
'writeable': 1 if os.access(os_file_path, os.W_OK) else 0,
'created': datetime.datetime.fromtimestamp(ctime).ctime(),
'modified': datetime.datetime.fromtimestamp(mtime).ctime(),
'timestamp': mtime,
'width': width,
'height': height,
'size': os.path.getsize(os_file_path)
}
if content:
attributes['content'] = content
return {
'id': path,
'type': file_type,
'attributes': attributes
}
|
:param path: relative path, or None to get from request
:param content: file content, output in data. Used for editfile
|
def register_link(self, link):
"""
source record and index must have been set
"""
keys = tuple((ref, link.initial_hook_value) for ref in link.hook_references)
# look for a record hook
for k in keys:
if k in self._record_hooks:
# set link target
link.set_target(target_record=self._record_hooks[k].target_record)
break
else:
# look for a table hook
for k in keys:
if k in self._table_hooks:
# set link target
link.set_target(target_table=self._table_hooks[k])
break
else:
field_descriptor = link.source_record.get_field_descriptor(link.source_index)
raise FieldValidationError(
f"No object found with any of given references : {keys}. "
f"{field_descriptor.get_error_location_message(link.initial_hook_value)}"
)
# store by source
if link.source_record not in self._links_by_source:
self._links_by_source[link.source_record] = set()
self._links_by_source[link.source_record].add(link)
# store by target
if link.target not in self._links_by_target:
self._links_by_target[link.target] = set()
self._links_by_target[link.target].add(link)
|
source record and index must have been set
|
def split(table, field, pattern, newfields=None, include_original=False,
maxsplit=0, flags=0):
"""
Add one or more new fields with values generated by splitting an
existing value around occurrences of a regular expression. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'variable', 'value'],
... ['1', 'parad1', '12'],
... ['2', 'parad2', '15'],
... ['3', 'tempd1', '18'],
... ['4', 'tempd2', '19']]
>>> table2 = etl.split(table1, 'variable', 'd', ['variable', 'day'])
>>> table2
+-----+-------+----------+-----+
| id | value | variable | day |
+=====+=======+==========+=====+
| '1' | '12' | 'para' | '1' |
+-----+-------+----------+-----+
| '2' | '15' | 'para' | '2' |
+-----+-------+----------+-----+
| '3' | '18' | 'temp' | '1' |
+-----+-------+----------+-----+
| '4' | '19' | 'temp' | '2' |
+-----+-------+----------+-----+
By default the field on which the split is performed is omitted. It can
be included using the `include_original` argument.
"""
return SplitView(table, field, pattern, newfields, include_original,
maxsplit, flags)
|
Add one or more new fields with values generated by splitting an
existing value around occurrences of a regular expression. E.g.::
>>> import petl as etl
>>> table1 = [['id', 'variable', 'value'],
... ['1', 'parad1', '12'],
... ['2', 'parad2', '15'],
... ['3', 'tempd1', '18'],
... ['4', 'tempd2', '19']]
>>> table2 = etl.split(table1, 'variable', 'd', ['variable', 'day'])
>>> table2
+-----+-------+----------+-----+
| id | value | variable | day |
+=====+=======+==========+=====+
| '1' | '12' | 'para' | '1' |
+-----+-------+----------+-----+
| '2' | '15' | 'para' | '2' |
+-----+-------+----------+-----+
| '3' | '18' | 'temp' | '1' |
+-----+-------+----------+-----+
| '4' | '19' | 'temp' | '2' |
+-----+-------+----------+-----+
By default the field on which the split is performed is omitted. It can
be included using the `include_original` argument.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.