code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def cli(env, identifier, uri, ibm_api_key):
"""Export an image to object storage.
The URI for an object storage object (.vhd/.iso file) of the format:
swift://<objectStorageAccount>@<cluster>/<container>/<objectPath>
or cos://<regionName>/<bucketName>/<objectPath> if using IBM Cloud
Object Storage
"""
image_mgr = SoftLayer.ImageManager(env.client)
image_id = helpers.resolve_id(image_mgr.resolve_ids, identifier, 'image')
result = image_mgr.export_image_to_uri(image_id, uri, ibm_api_key)
if not result:
raise exceptions.CLIAbort("Failed to export Image")
|
Export an image to object storage.
The URI for an object storage object (.vhd/.iso file) of the format:
swift://<objectStorageAccount>@<cluster>/<container>/<objectPath>
or cos://<regionName>/<bucketName>/<objectPath> if using IBM Cloud
Object Storage
|
def process(self, job_id):
"""
Process a job by the queue
"""
self._logger.info(
'{:.2f}: Process job {}'.format(self._env.now, job_id)
)
# log time of commencement of service
self._observer.notify_service(time=self._env.now, job_id=job_id)
# draw a new service time
try:
service_time = next(self._service_time_generator)
except StopIteration:
# ERROR: no more service times
error_msg = ('Service time generator exhausted')
self._logger.error(error_msg)
# raise a different exception, as simpy uses StopIteration to
# signify end of process (generator)
raise GGCQServiceTimeStopIteration(error_msg)
# wait for the service time to pass
try:
self._logger.debug('Service time: {:.2f}'.format(service_time))
except:
pass
try:
yield self._env.timeout(service_time)
except TypeError:
# error: service time of wrong type
error_msg = (
"service time '{}' has wrong type '{}'".format(
service_time, type(service_time).__name__
)
)
self._logger.error(error_msg)
# trigger exception
raise GGCQServiceTimeTypeError(error_msg)
except ValueError as exc:
if str(exc).startswith('Negative delay'):
# error: negative service time
error_msg = (
"negative service time {:.2f}".format(
service_time
)
)
self._logger.error(error_msg)
# trigger exception
raise GGCQNegativeServiceTimeError(error_msg)
else:
raise
# job finished processing -> departing
self._logger.info(
'{:.2f}: Finished processing job {}'.format(self._env.now, job_id)
)
# log departure epoch
self._observer.notify_departure(time=self._env.now, job_id=job_id)
|
Process a job by the queue
|
def localize_fieldnames(fields, internationalized_fields):
"""
Given a list of fields and a list of field names that
are internationalized, will return a list with
all internationalized fields properly localized.
>>> from django.utils.translation import activate
>>> activate('en-us')
>>> localize_fieldnames(['name', 'title', 'url'], ['title'])
['name', 'title_en_us', 'url']
:param fields: A :class:`list` af field names.
:param internationalized_fields: A list of fields names, these fields are internationalized.
:rtype: A list with the actual field names that are used in the current language.
"""
result = []
lang = get_language()
for field in fields:
if field in internationalized_fields:
result.append(get_real_fieldname(field, lang))
else:
result.append(field)
return result
|
Given a list of fields and a list of field names that
are internationalized, will return a list with
all internationalized fields properly localized.
>>> from django.utils.translation import activate
>>> activate('en-us')
>>> localize_fieldnames(['name', 'title', 'url'], ['title'])
['name', 'title_en_us', 'url']
:param fields: A :class:`list` af field names.
:param internationalized_fields: A list of fields names, these fields are internationalized.
:rtype: A list with the actual field names that are used in the current language.
|
def derivative(self, x):
"""Return the derivative at ``x``.
The derivative of the right scalar operator multiplication
follows the chain rule:
``OperatorRightScalarMult(op, s).derivative(y) ==
OperatorLeftScalarMult(op.derivative(s * y), s)``
Parameters
----------
x : `domain` `element-like`
Evaluation point of the derivative.
Examples
--------
>>> space = odl.rn(3)
>>> operator = odl.IdentityOperator(space) - space.element([1, 1, 1])
>>> left_mul_op = OperatorRightScalarMult(operator, 3)
>>> derivative = left_mul_op.derivative([0, 0, 0])
>>> derivative([1, 1, 1])
rn(3).element([ 3., 3., 3.])
"""
return self.scalar * self.operator.derivative(self.scalar * x)
|
Return the derivative at ``x``.
The derivative of the right scalar operator multiplication
follows the chain rule:
``OperatorRightScalarMult(op, s).derivative(y) ==
OperatorLeftScalarMult(op.derivative(s * y), s)``
Parameters
----------
x : `domain` `element-like`
Evaluation point of the derivative.
Examples
--------
>>> space = odl.rn(3)
>>> operator = odl.IdentityOperator(space) - space.element([1, 1, 1])
>>> left_mul_op = OperatorRightScalarMult(operator, 3)
>>> derivative = left_mul_op.derivative([0, 0, 0])
>>> derivative([1, 1, 1])
rn(3).element([ 3., 3., 3.])
|
def as_xml(self):
"""Return XML serialization of this list.
This code does not support the case where the list is too big for
a single XML document.
"""
self.default_capability()
s = self.new_sitemap()
return s.resources_as_xml(self, sitemapindex=self.sitemapindex)
|
Return XML serialization of this list.
This code does not support the case where the list is too big for
a single XML document.
|
def read_namespaced_pod_status(self, name, namespace, **kwargs): # noqa: E501
"""read_namespaced_pod_status # noqa: E501
read status of the specified Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Pod (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Pod
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.read_namespaced_pod_status_with_http_info(name, namespace, **kwargs) # noqa: E501
else:
(data) = self.read_namespaced_pod_status_with_http_info(name, namespace, **kwargs) # noqa: E501
return data
|
read_namespaced_pod_status # noqa: E501
read status of the specified Pod # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.read_namespaced_pod_status(name, namespace, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str name: name of the Pod (required)
:param str namespace: object name and auth scope, such as for teams and projects (required)
:param str pretty: If 'true', then the output is pretty printed.
:return: V1Pod
If the method is called asynchronously,
returns the request thread.
|
def clear_display_name(self):
"""Clears the display name.
raise: NoAccess - ``display_name`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
"""
if (self.get_display_name_metadata().is_read_only() or
self.get_display_name_metadata().is_required()):
raise errors.NoAccess()
self._my_map['displayName'] = self._display_name_metadata['default_string_values'][0]
|
Clears the display name.
raise: NoAccess - ``display_name`` cannot be modified
*compliance: mandatory -- This method must be implemented.*
|
def save_data(self):
"""Save data"""
title = _( "Save profiler result")
filename, _selfilter = getsavefilename(
self, title, getcwd_or_home(),
_("Profiler result")+" (*.Result)")
if filename:
self.datatree.save_data(filename)
|
Save data
|
def find_device(self, service_uuids=[], name=None, timeout_sec=TIMEOUT_SEC):
"""Return the first device that advertises the specified service UUIDs or
has the specified name. Will wait up to timeout_sec seconds for the device
to be found, and if the timeout is zero then it will not wait at all and
immediately return a result. When no device is found a value of None is
returned.
"""
start = time.time()
while True:
# Call find_devices and grab the first result if any are found.
found = self.find_devices(service_uuids, name)
if len(found) > 0:
return found[0]
# No device was found. Check if the timeout is exceeded and wait to
# try again.
if time.time()-start >= timeout_sec:
# Failed to find a device within the timeout.
return None
time.sleep(1)
|
Return the first device that advertises the specified service UUIDs or
has the specified name. Will wait up to timeout_sec seconds for the device
to be found, and if the timeout is zero then it will not wait at all and
immediately return a result. When no device is found a value of None is
returned.
|
def MI_referenceNames(self,
env,
objectName,
resultClassName,
role):
# pylint: disable=invalid-name
"""Return instance names of an association class.
Implements the WBEM operation ReferenceNames in terms
of the references method. A derived class will not normally
override this method.
"""
logger = env.get_logger()
logger.log_debug('CIMProvider2 MI_referenceNames <2> called. ' \
'resultClass: %s' % (resultClassName))
if not resultClassName:
raise pywbem.CIMError(
pywbem.CIM_ERR_FAILED,
"Empty resultClassName passed to ReferenceNames")
model = pywbem.CIMInstance(classname=resultClassName)
model.path = pywbem.CIMInstanceName(classname=resultClassName,
namespace=objectName.namespace)
if role:
if role in model.properties:
model[role] = objectName
gen = self.references(env=env,
object_name=objectName,
model=model,
result_class_name='',
role=role,
result_role=None,
keys_only=True)
if gen is None:
logger.log_debug('references() returned None instead of ' \
'generator object')
return
for inst in gen:
for prop in inst.properties.values():
if hasattr(prop.value, 'namespace') and \
prop.value.namespace is None:
prop.value.namespace = objectName.namespace
yield inst.path
logger.log_debug('CIMProvider2 MI_referenceNames returning')
|
Return instance names of an association class.
Implements the WBEM operation ReferenceNames in terms
of the references method. A derived class will not normally
override this method.
|
def _build_latex_array(self, aliases=None):
"""Returns an array of strings containing \\LaTeX for this circuit.
If aliases is not None, aliases contains a dict mapping
the current qubits in the circuit to new qubit names.
We will deduce the register names and sizes from aliases.
"""
columns = 1
# Rename qregs if necessary
if aliases:
qregdata = {}
for q in aliases.values():
if q[0] not in qregdata:
qregdata[q[0]] = q[1] + 1
elif qregdata[q[0]] < q[1] + 1:
qregdata[q[0]] = q[1] + 1
else:
qregdata = self.qregs
for column, layer in enumerate(self.ops, 1):
for op in layer:
if op.condition:
mask = self._get_mask(op.condition[0])
cl_reg = self.clbit_list[self._ffs(mask)]
if_reg = cl_reg[0]
pos_2 = self.img_regs[cl_reg]
if_value = format(op.condition[1],
'b').zfill(self.cregs[if_reg])[::-1]
if op.name not in ['measure', 'barrier', 'snapshot', 'load',
'save', 'noise']:
nm = op.name
qarglist = op.qargs
if aliases is not None:
qarglist = map(lambda x: aliases[x], qarglist)
if len(qarglist) == 1:
pos_1 = self.img_regs[(qarglist[0][0],
qarglist[0][1])]
if op.condition:
mask = self._get_mask(op.condition[0])
cl_reg = self.clbit_list[self._ffs(mask)]
if_reg = cl_reg[0]
pos_2 = self.img_regs[cl_reg]
if nm == "x":
self._latex[pos_1][column] = "\\gate{X}"
elif nm == "y":
self._latex[pos_1][column] = "\\gate{Y}"
elif nm == "z":
self._latex[pos_1][column] = "\\gate{Z}"
elif nm == "h":
self._latex[pos_1][column] = "\\gate{H}"
elif nm == "s":
self._latex[pos_1][column] = "\\gate{S}"
elif nm == "sdg":
self._latex[pos_1][column] = "\\gate{S^\\dag}"
elif nm == "t":
self._latex[pos_1][column] = "\\gate{T}"
elif nm == "tdg":
self._latex[pos_1][column] = "\\gate{T^\\dag}"
elif nm == "u0":
self._latex[pos_1][column] = "\\gate{U_0(%s)}" % (
op.op.params[0])
elif nm == "u1":
self._latex[pos_1][column] = "\\gate{U_1(%s)}" % (
op.op.params[0])
elif nm == "u2":
self._latex[pos_1][column] = \
"\\gate{U_2\\left(%s,%s\\right)}" % (
op.op.params[0], op.op.params[1])
elif nm == "u3":
self._latex[pos_1][column] = ("\\gate{U_3(%s,%s,%s)}" % (
op.op.params[0],
op.op.params[1],
op.op.params[2]))
elif nm == "rx":
self._latex[pos_1][column] = "\\gate{R_x(%s)}" % (
op.op.params[0])
elif nm == "ry":
self._latex[pos_1][column] = "\\gate{R_y(%s)}" % (
op.op.params[0])
elif nm == "rz":
self._latex[pos_1][column] = "\\gate{R_z(%s)}" % (
op.op.params[0])
else:
self._latex[pos_1][columns] = "\\gate{%s}" % nm
gap = pos_2 - pos_1
for i in range(self.cregs[if_reg]):
if if_value[i] == '1':
self._latex[pos_2 + i][column] = \
"\\control \\cw \\cwx[-" + str(gap) + "]"
gap = 1
else:
self._latex[pos_2 + i][column] = \
"\\controlo \\cw \\cwx[-" + str(gap) + "]"
gap = 1
else:
if nm == "x":
self._latex[pos_1][column] = "\\gate{X}"
elif nm == "y":
self._latex[pos_1][column] = "\\gate{Y}"
elif nm == "z":
self._latex[pos_1][column] = "\\gate{Z}"
elif nm == "h":
self._latex[pos_1][column] = "\\gate{H}"
elif nm == "s":
self._latex[pos_1][column] = "\\gate{S}"
elif nm == "sdg":
self._latex[pos_1][column] = "\\gate{S^\\dag}"
elif nm == "t":
self._latex[pos_1][column] = "\\gate{T}"
elif nm == "tdg":
self._latex[pos_1][column] = "\\gate{T^\\dag}"
elif nm == "u0":
self._latex[pos_1][column] = "\\gate{U_0(%s)}" % (
op.op.params[0])
elif nm == "u1":
self._latex[pos_1][column] = "\\gate{U_1(%s)}" % (
op.op.params[0])
elif nm == "u2":
self._latex[pos_1][column] = \
"\\gate{U_2\\left(%s,%s\\right)}" % (
op.op.params[0], op.op.params[1])
elif nm == "u3":
self._latex[pos_1][column] = ("\\gate{U_3(%s,%s,%s)}" % (
op.op.params[0],
op.op.params[1],
op.op.params[2]))
elif nm == "rx":
self._latex[pos_1][column] = "\\gate{R_x(%s)}" % (
op.op.params[0])
elif nm == "ry":
self._latex[pos_1][column] = "\\gate{R_y(%s)}" % (
op.op.params[0])
elif nm == "rz":
self._latex[pos_1][column] = "\\gate{R_z(%s)}" % (
op.op.params[0])
elif nm == "reset":
self._latex[pos_1][column] = (
"\\push{\\rule{.6em}{0em}\\ket{0}\\"
"rule{.2em}{0em}} \\qw")
else:
self._latex[pos_1][columns] = "\\gate{%s}" % nm
elif len(qarglist) == 2:
pos_1 = self.img_regs[(qarglist[0][0], qarglist[0][1])]
pos_2 = self.img_regs[(qarglist[1][0], qarglist[1][1])]
if op.condition:
pos_3 = self.img_regs[(if_reg, 0)]
temp = [pos_1, pos_2, pos_3]
temp.sort(key=int)
bottom = temp[1]
gap = pos_3 - bottom
for i in range(self.cregs[if_reg]):
if if_value[i] == '1':
self._latex[pos_3 + i][column] = \
"\\control \\cw \\cwx[-" + str(gap) + "]"
gap = 1
else:
self._latex[pos_3 + i][column] = \
"\\controlo \\cw \\cwx[-" + str(gap) + "]"
gap = 1
if nm == "cx":
self._latex[pos_1][column] = \
"\\ctrl{" + str(pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\targ"
elif nm == "cz":
self._latex[pos_1][column] = \
"\\ctrl{" + str(pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\control\\qw"
elif nm == "cy":
self._latex[pos_1][column] = \
"\\ctrl{" + str(pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\gate{Y}"
elif nm == "ch":
self._latex[pos_1][column] = \
"\\ctrl{" + str(pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\gate{H}"
elif nm == "swap":
self._latex[pos_1][column] = "\\qswap"
self._latex[pos_2][column] = \
"\\qswap \\qwx[" + str(pos_1 - pos_2) + "]"
elif nm == "crz":
self._latex[pos_1][column] = \
"\\ctrl{" + str(pos_2 - pos_1) + "}"
self._latex[pos_2][column] = \
"\\gate{R_z(%s)}" % (op.op.params[0])
elif nm == "cu1":
self._latex[pos_1][column - 1] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column - 1] = "\\control\\qw"
self._latex[min(pos_1, pos_2)][column] = \
"\\dstick{%s}\\qw" % (op.op.params[0])
self._latex[max(pos_1, pos_2)][column] = "\\qw"
elif nm == "cu3":
self._latex[pos_1][column] = \
"\\ctrl{" + str(pos_2 - pos_1) + "}"
self._latex[pos_2][column] = \
"\\gate{U_3(%s,%s,%s)}" % (op.op.params[0],
op.op.params[1],
op.op.params[2])
else:
temp = [pos_1, pos_2]
temp.sort(key=int)
if nm == "cx":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\targ"
elif nm == "cz":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\control\\qw"
elif nm == "cy":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\gate{Y}"
elif nm == "ch":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\gate{H}"
elif nm == "swap":
self._latex[pos_1][column] = "\\qswap"
self._latex[pos_2][column] = \
"\\qswap \\qwx[" + str(pos_1 - pos_2) + "]"
elif nm == "crz":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = \
"\\gate{R_z(%s)}" % (op.op.params[0])
elif nm == "cu1":
self._latex[pos_1][column - 1] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column - 1] = "\\control\\qw"
self._latex[min(pos_1, pos_2)][column] = \
"\\dstick{%s}\\qw" % (op.op.params[0])
self._latex[max(pos_1, pos_2)][column] = "\\qw"
elif nm == "cu3":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = ("\\gate{U_3(%s,%s,%s)}" % (
op.op.params[0],
op.op.params[1],
op.op.params[2]))
else:
start_pos = min([pos_1, pos_2])
stop_pos = max([pos_1, pos_2])
if stop_pos - start_pos >= 2:
delta = stop_pos - start_pos
self._latex[start_pos][columns] = (
"\\multigate{%s}{%s}" % (delta, nm))
for i_pos in range(start_pos + 1, stop_pos + 1):
self._latex[i_pos][columns] = "\\ghost{%s}" % nm
else:
self._latex[start_pos][columns] = (
"\\multigate{1}{%s}" % nm)
self._latex[stop_pos][columns] = "\\ghost{%s}" % nm
elif len(qarglist) == 3:
pos_1 = self.img_regs[(qarglist[0][0], qarglist[0][1])]
pos_2 = self.img_regs[(qarglist[1][0], qarglist[1][1])]
pos_3 = self.img_regs[(qarglist[2][0], qarglist[2][1])]
if op.condition:
pos_4 = self.img_regs[(if_reg, 0)]
temp = [pos_1, pos_2, pos_3, pos_4]
temp.sort(key=int)
bottom = temp[2]
prev_column = [x[column - 1] for x in self._latex]
for item, prev_entry in enumerate(prev_column):
if 'barrier' in prev_entry:
span = re.search('barrier{(.*)}', prev_entry)
if span and any(i in temp for i in range(
item, int(span.group(1)))):
self._latex[item][column - 1] = \
prev_entry.replace(
'\\barrier{',
'\\barrier[-0.65em]{')
gap = pos_4 - bottom
for i in range(self.cregs[if_reg]):
if if_value[i] == '1':
self._latex[pos_4 + i][column] = \
"\\control \\cw \\cwx[-" + str(gap) + "]"
gap = 1
else:
self._latex[pos_4 + i][column] = \
"\\controlo \\cw \\cwx[-" + str(gap) + "]"
gap = 1
if nm == "ccx":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\ctrl{" + str(
pos_3 - pos_2) + "}"
self._latex[pos_3][column] = "\\targ"
if nm == "cswap":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\qswap"
self._latex[pos_3][column] = \
"\\qswap \\qwx[" + str(pos_2 - pos_3) + "]"
else:
temp = [pos_1, pos_2, pos_3]
temp.sort(key=int)
prev_column = [x[column - 1] for x in self._latex]
for item, prev_entry in enumerate(prev_column):
if 'barrier' in prev_entry:
span = re.search('barrier{(.*)}', prev_entry)
if span and any(i in temp for i in range(
item, int(span.group(1)))):
self._latex[item][column - 1] = \
prev_entry.replace(
'\\barrier{',
'\\barrier[-0.65em]{')
if nm == "ccx":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\ctrl{" + str(
pos_3 - pos_2) + "}"
self._latex[pos_3][column] = "\\targ"
elif nm == "cswap":
self._latex[pos_1][column] = "\\ctrl{" + str(
pos_2 - pos_1) + "}"
self._latex[pos_2][column] = "\\qswap"
self._latex[pos_3][column] = \
"\\qswap \\qwx[" + str(pos_2 - pos_3) + "]"
else:
start_pos = min([pos_1, pos_2, pos_3])
stop_pos = max([pos_1, pos_2, pos_3])
if stop_pos - start_pos >= 3:
delta = stop_pos - start_pos
self._latex[start_pos][columns] = (
"\\multigate{%s}{%s}" % (delta, nm))
for i_pos in range(start_pos + 1, stop_pos + 1):
self._latex[i_pos][columns] = "\\ghost{%s}" % nm
else:
self._latex[pos_1][columns] = (
"\\multigate{2}{%s}" % nm)
self._latex[pos_2][columns] = "\\ghost{%s}" % nm
self._latex[pos_3][columns] = "\\ghost{%s}" % nm
elif len(qarglist) > 3:
nbits = len(qarglist)
pos_array = [self.img_regs[(qarglist[0][0],
qarglist[0][1])]]
for i in range(1, nbits):
pos_array.append(self.img_regs[(qarglist[i][0],
qarglist[i][1])])
pos_start = min(pos_array)
pos_stop = max(pos_array)
delta = pos_stop - pos_start
self._latex[pos_start][columns] = (
"\\multigate{%s}{%s}" % (nbits - 1, nm))
for pos in range(pos_start + 1, pos_stop + 1):
self._latex[pos][columns] = "\\ghost{%s}" % nm
elif op.name == "measure":
if (len(op.cargs) != 1
or len(op.qargs) != 1
or op.op.params):
raise exceptions.VisualizationError("bad operation record")
if op.condition:
raise exceptions.VisualizationError(
"If controlled measures currently not supported.")
qname, qindex = op.qargs[0]
cname, cindex = op.cargs[0]
if aliases:
newq = aliases[(qname, qindex)]
qname = newq[0]
qindex = newq[1]
pos_1 = self.img_regs[(qname, qindex)]
pos_2 = self.img_regs[(cname, cindex)]
try:
self._latex[pos_1][column] = "\\meter"
prev_column = [x[column - 1] for x in self._latex]
for item, prev_entry in enumerate(prev_column):
if 'barrier' in prev_entry:
span = re.search('barrier{(.*)}', prev_entry)
if span and (
item + int(span.group(1))) - pos_1 >= 0:
self._latex[item][column - 1] = \
prev_entry.replace(
'\\barrier{',
'\\barrier[-1.15em]{')
self._latex[pos_2][column] = \
"\\cw \\cwx[-" + str(pos_2 - pos_1) + "]"
except Exception as e:
raise exceptions.VisualizationError(
'Error during Latex building: %s' % str(e))
elif op.name in ['barrier', 'snapshot', 'load', 'save',
'noise']:
if self.plot_barriers:
qarglist = op.qargs
indexes = [self._get_qubit_index(x) for x in qarglist]
start_bit = self.qubit_list[min(indexes)]
if aliases is not None:
qarglist = map(lambda x: aliases[x], qarglist)
start = self.img_regs[start_bit]
span = len(op.qargs) - 1
self._latex[start][column] = "\\qw \\barrier{" + str(
span) + "}"
else:
raise exceptions.VisualizationError("bad node data")
|
Returns an array of strings containing \\LaTeX for this circuit.
If aliases is not None, aliases contains a dict mapping
the current qubits in the circuit to new qubit names.
We will deduce the register names and sizes from aliases.
|
def get_value(self, index):
"""Return current value"""
if index.column() == 0:
return self.keys[ index.row() ]
elif index.column() == 1:
return self.types[ index.row() ]
elif index.column() == 2:
return self.sizes[ index.row() ]
else:
return self._data[ self.keys[index.row()] ]
|
Return current value
|
def dot(x_gpu, y_gpu, transa='N', transb='N', handle=None, target=None):
"""
Dot product of two arrays.
For 1D arrays, this function computes the inner product. For 2D
arrays of shapes `(m, k)` and `(k, n)`, it computes the matrix
product; the result has shape `(m, n)`.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Input array.
y_gpu : pycuda.gpuarray.GPUArray
Input array.
transa : char
If 'T', compute the product of the transpose of `x_gpu`.
If 'C', compute the product of the Hermitian of `x_gpu`.
transb : char
If 'T', compute the product of the transpose of `y_gpu`.
If 'C', compute the product of the Hermitian of `y_gpu`.
handle : int
CUBLAS context. If no context is specified, the default handle from
`scikits.cuda.misc._global_cublas_handle` is used.
Returns
-------
c_gpu : pycuda.gpuarray.GPUArray, float{32,64}, or complex{64,128}
Inner product of `x_gpu` and `y_gpu`. When the inputs are 1D
arrays, the result will be returned as a scalar.
Notes
-----
The input matrices must all contain elements of the same data type.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import linalg
>>> import misc
>>> linalg.init()
>>> a = np.asarray(np.random.rand(4, 2), np.float32)
>>> b = np.asarray(np.random.rand(2, 2), np.float32)
>>> a_gpu = gpuarray.to_gpu(a)
>>> b_gpu = gpuarray.to_gpu(b)
>>> c_gpu = linalg.dot(a_gpu, b_gpu)
>>> np.allclose(np.dot(a, b), c_gpu.get())
True
>>> d = np.asarray(np.random.rand(5), np.float32)
>>> e = np.asarray(np.random.rand(5), np.float32)
>>> d_gpu = gpuarray.to_gpu(d)
>>> e_gpu = gpuarray.to_gpu(e)
>>> f = linalg.dot(d_gpu, e_gpu)
>>> np.allclose(np.dot(d, e), f)
True
"""
if handle is None:
handle = _global_cublas_handle
if len(x_gpu.shape) == 1 and len(y_gpu.shape) == 1:
if x_gpu.size != y_gpu.size:
raise ValueError('arrays must be of same length: '
'x_gpu.size = %d, y_gpu.size = %d' %
(x_gpu.size, y_gpu.size))
# Compute inner product for 1D arrays:
if (x_gpu.dtype == np.complex64 and y_gpu.dtype == np.complex64):
cublas_func = cublas.cublasCdotu
elif (x_gpu.dtype == np.float32 and y_gpu.dtype == np.float32):
cublas_func = cublas.cublasSdot
elif (x_gpu.dtype == np.complex128 and y_gpu.dtype == np.complex128):
cublas_func = cublas.cublasZdotu
elif (x_gpu.dtype == np.float64 and y_gpu.dtype == np.float64):
cublas_func = cublas.cublasDdot
else:
raise ValueError('unsupported combination of input types: '
'x_gpu.dtype = %s, y_gpu.dtype = %s' %
(str(x_gpu.dtype), str(y_gpu.dtype)))
return cublas_func(handle, x_gpu.size, x_gpu.gpudata, 1,
y_gpu.gpudata, 1)
else:
# Get the shapes of the arguments (accounting for the
# possibility that one of them may only have one dimension):
x_shape = x_gpu.shape
y_shape = y_gpu.shape
if len(x_shape) == 1:
x_shape = (1, x_shape[0])
if len(y_shape) == 1:
y_shape = (1, y_shape[0])
# Perform matrix multiplication for 2D arrays:
if (x_gpu.dtype == np.complex64 and y_gpu.dtype == np.complex64):
cublas_func = cublas.cublasCgemm
alpha = np.complex64(1.0)
beta = np.complex64(0.0)
elif (x_gpu.dtype == np.float32 and y_gpu.dtype == np.float32):
cublas_func = cublas.cublasSgemm
alpha = np.float32(1.0)
beta = np.float32(0.0)
elif (x_gpu.dtype == np.complex128 and y_gpu.dtype == np.complex128):
cublas_func = cublas.cublasZgemm
alpha = np.complex128(1.0)
beta = np.complex128(0.0)
elif (x_gpu.dtype == np.float64 and y_gpu.dtype == np.float64):
cublas_func = cublas.cublasDgemm
alpha = np.float64(1.0)
beta = np.float64(0.0)
else:
raise ValueError('unsupported combination of input types: '
'x_gpu.dtype = %s, y_gpu.dtype = %s' %
(str(x_gpu.dtype), str(y_gpu.dtype)))
transa = lower(transa)
transb = lower(transb)
if transb in ['t', 'c']:
m, k = y_shape
elif transb in ['n']:
k, m = y_shape
else:
raise ValueError('invalid value "%s" for transb' % transb)
if transa in ['t', 'c']:
l, n = x_shape
elif transa in ['n']:
n, l = x_shape
else:
raise ValueError('invalid value "%s" for transa' % transa)
if l != k:
raise ValueError('objects are not aligned: x_shape = %s, y_shape = %s' %
(x_shape, y_shape))
if transb == 'n':
lda = max(1, m)
else:
lda = max(1, k)
if transa == 'n':
ldb = max(1, k)
else:
ldb = max(1, n)
ldc = max(1, m)
# Note that the desired shape of the output matrix is the transpose
# of what CUBLAS assumes:
if target is None:
target = gpuarray.empty((n, ldc), x_gpu.dtype, allocator=memory_pool.allocate)
cublas_func(handle, transb, transa, m, n, k, alpha, y_gpu.gpudata,
lda, x_gpu.gpudata, ldb, beta, target.gpudata, ldc)
return target
|
Dot product of two arrays.
For 1D arrays, this function computes the inner product. For 2D
arrays of shapes `(m, k)` and `(k, n)`, it computes the matrix
product; the result has shape `(m, n)`.
Parameters
----------
x_gpu : pycuda.gpuarray.GPUArray
Input array.
y_gpu : pycuda.gpuarray.GPUArray
Input array.
transa : char
If 'T', compute the product of the transpose of `x_gpu`.
If 'C', compute the product of the Hermitian of `x_gpu`.
transb : char
If 'T', compute the product of the transpose of `y_gpu`.
If 'C', compute the product of the Hermitian of `y_gpu`.
handle : int
CUBLAS context. If no context is specified, the default handle from
`scikits.cuda.misc._global_cublas_handle` is used.
Returns
-------
c_gpu : pycuda.gpuarray.GPUArray, float{32,64}, or complex{64,128}
Inner product of `x_gpu` and `y_gpu`. When the inputs are 1D
arrays, the result will be returned as a scalar.
Notes
-----
The input matrices must all contain elements of the same data type.
Examples
--------
>>> import pycuda.gpuarray as gpuarray
>>> import pycuda.autoinit
>>> import numpy as np
>>> import linalg
>>> import misc
>>> linalg.init()
>>> a = np.asarray(np.random.rand(4, 2), np.float32)
>>> b = np.asarray(np.random.rand(2, 2), np.float32)
>>> a_gpu = gpuarray.to_gpu(a)
>>> b_gpu = gpuarray.to_gpu(b)
>>> c_gpu = linalg.dot(a_gpu, b_gpu)
>>> np.allclose(np.dot(a, b), c_gpu.get())
True
>>> d = np.asarray(np.random.rand(5), np.float32)
>>> e = np.asarray(np.random.rand(5), np.float32)
>>> d_gpu = gpuarray.to_gpu(d)
>>> e_gpu = gpuarray.to_gpu(e)
>>> f = linalg.dot(d_gpu, e_gpu)
>>> np.allclose(np.dot(d, e), f)
True
|
def get_type(full_path):
"""Get the type (socket, file, dir, symlink, ...) for the provided path"""
status = {'type': []}
if os.path.ismount(full_path):
status['type'] += ['mount-point']
elif os.path.islink(full_path):
status['type'] += ['symlink']
if os.path.isfile(full_path):
status['type'] += ['file']
elif os.path.isdir(full_path):
status['type'] += ['dir']
if not status['type']:
if os.stat.S_ISSOCK(status['mode']):
status['type'] += ['socket']
elif os.stat.S_ISCHR(status['mode']):
status['type'] += ['special']
elif os.stat.S_ISBLK(status['mode']):
status['type'] += ['block-device']
elif os.stat.S_ISFIFO(status['mode']):
status['type'] += ['pipe']
if not status['type']:
status['type'] += ['unknown']
elif status['type'] and status['type'][-1] == 'symlink':
status['type'] += ['broken']
return status['type']
|
Get the type (socket, file, dir, symlink, ...) for the provided path
|
def start_server(app: web.Application = None, port: int = None,
address: str = None, **kwargs: Any) -> HTTPServer:
"""Start server with ``app`` on ``localhost:port``.
If port is not specified, use command line option of ``--port``.
"""
app = app or get_app()
port = port if port is not None else config.port
address = address if address is not None else config.address
server = app.listen(port, address=address)
app.server = server
app.loop = asyncio.get_event_loop()
server_config['address'] = address
for sock in server._sockets.values():
if sock.family == socket.AF_INET:
server_config['port'] = sock.getsockname()[1]
break
return server
|
Start server with ``app`` on ``localhost:port``.
If port is not specified, use command line option of ``--port``.
|
def plotGrid(self, numLines=(5,5), lineWidth=1, colour="#777777"):
"""Plot NUMLINES[0] vertical gridlines and NUMLINES[1] horizontal gridlines,
while keeping the initial axes bounds that were present upon its calling.
Will not work for certain cases.
"""
x1, x2, y1, y2 = mp.axis()
ra1, dec0 = self.pixToSky(x1, y1)
ra0, dec1 = self.pixToSky(x2, y2)
xNum, yNum = numLines
self.raRange, self.decRange = self.getRaDecRanges(numLines)
#import pdb; pdb.set_trace()
#Guard against Ra of zero within the plot
a1 = np.abs(ra1-ra0)
a2 = np.abs( min(ra0, ra1) - (max(ra0, ra1) - 360))
if a2 < a1: #Then we straddle 360 degrees in RA
if ra0 < ra1:
ra1 -= 360
else:
ra0 -= 360
#Draw lines of constant dec
lwr = min(ra0, ra1)
upr = max(ra0, ra1)
stepX = round((upr-lwr) / float(xNum))
ra_deg = np.arange(lwr - 3*stepX, upr + 3.5*stepX, 1, dtype=np.float)
for dec in self.decRange:
self.plotLine(ra_deg, dec, '-', color = colour, linewidth = lineWidth)
#Draw lines of const ra
lwr = min(dec0, dec1)
upr = max(dec0, dec1)
stepY = round((upr-lwr) / float(yNum))
dec_deg = np.arange(dec0 - 3*stepY, dec1 + 3.5*stepY, 1, dtype=np.float)
for ra in self.raRange:
self.plotLine(ra, dec_deg, '-', color = colour, linewidth = lineWidth)
mp.axis([x1, x2, y1, y2])
|
Plot NUMLINES[0] vertical gridlines and NUMLINES[1] horizontal gridlines,
while keeping the initial axes bounds that were present upon its calling.
Will not work for certain cases.
|
def prepend_string_list(self, key, value, max_length_key):
"""Prepend a fixed-length string list with a new string.
The oldest string will be removed from the list. If the string is
already in the list, it is shuffled to the top. Use this to implement
things like a 'most recent files' entry.
"""
max_len = self.get(max_length_key)
strings = self.get_string_list(key)
strings = [value] + [x for x in strings if x != value]
strings = strings[:max_len]
self.beginWriteArray(key)
for i in range(len(strings)):
self.setArrayIndex(i)
self.setValue("entry", strings[i])
self.endArray()
|
Prepend a fixed-length string list with a new string.
The oldest string will be removed from the list. If the string is
already in the list, it is shuffled to the top. Use this to implement
things like a 'most recent files' entry.
|
def token(cls: Type[ConditionType], left: Any, op: Optional[Any] = None,
right: Optional[Any] = None) -> ConditionType:
"""
Return Condition instance from arguments and Operator
:param left: Left argument
:param op: Operator
:param right: Right argument
:return:
"""
condition = cls()
condition.left = left
if op:
condition.op = op
if right:
condition.right = right
return condition
|
Return Condition instance from arguments and Operator
:param left: Left argument
:param op: Operator
:param right: Right argument
:return:
|
def missing_any(da, freq, **kwds):
r"""Return a boolean DataArray indicating whether there are missing days in the resampled array.
Parameters
----------
da : DataArray
Input array at daily frequency.
freq : str
Resampling frequency.
Returns
-------
out : DataArray
A boolean array set to True if any month or year has missing values.
"""
c = da.notnull().resample(time=freq).sum(dim='time')
if '-' in freq:
pfreq, anchor = freq.split('-')
else:
pfreq = freq
if pfreq.endswith('S'):
start_time = c.indexes['time']
end_time = start_time.shift(1, freq=freq)
else:
end_time = c.indexes['time']
start_time = end_time.shift(-1, freq=freq)
n = (end_time - start_time).days
nda = xr.DataArray(n.values, coords={'time': c.time}, dims='time')
return c != nda
|
r"""Return a boolean DataArray indicating whether there are missing days in the resampled array.
Parameters
----------
da : DataArray
Input array at daily frequency.
freq : str
Resampling frequency.
Returns
-------
out : DataArray
A boolean array set to True if any month or year has missing values.
|
def get_snapshot_policies(self, view=None):
"""
Retrieve a list of snapshot policies.
@param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'.
@return: A list of snapshot policies.
@since: API v6
"""
return self._get("snapshots/policies", ApiSnapshotPolicy, True,
params=view and dict(view=view) or None, api_version=6)
|
Retrieve a list of snapshot policies.
@param view: View to materialize. Valid values are 'full', 'summary', 'export', 'export_redacted'.
@return: A list of snapshot policies.
@since: API v6
|
def get_placeholder_image(width, height, name=None, fg_color=get_color('black'),
bg_color=get_color('grey'), text=None, font=u'Verdana.ttf',
fontsize=42, encoding=u'unic', mode='RGBA', fmt=u'PNG'):
"""Little spin-off from https://github.com/Visgean/python-placeholder
that not saves an image and instead returns it."""
size = (width, height)
text = text if text else '{0}x{1}'.format(width, height)
try:
font = ImageFont.truetype(font, size=fontsize, encoding=encoding)
except IOError:
font = ImageFont.load_default()
result_img = Image.new(mode, size, bg_color)
text_size = font.getsize(text)
text_img = Image.new("RGBA", size, bg_color)
#position for the text:
left = size[0] / 2 - text_size[0] / 2
top = size[1] / 2 - text_size[1] / 2
drawing = ImageDraw.Draw(text_img)
drawing.text((left, top),
text,
font=font,
fill=fg_color)
txt_img = ImageOps.fit(text_img, size, method=Image.BICUBIC, centering=(0.5, 0.5))
result_img.paste(txt_img)
file_obj = io.BytesIO()
txt_img.save(file_obj, fmt)
return file_obj.getvalue()
|
Little spin-off from https://github.com/Visgean/python-placeholder
that not saves an image and instead returns it.
|
def get_pressure(self):
"""
Returns the pressure in Millibars
"""
self._init_pressure() # Ensure pressure sensor is initialised
pressure = 0
data = self._pressure.pressureRead()
if (data[0]): # Pressure valid
pressure = data[1]
return pressure
|
Returns the pressure in Millibars
|
def _construct_role(self, managed_policy_map):
"""Constructs a Lambda execution role based on this SAM function's Policies property.
:returns: the generated IAM Role
:rtype: model.iam.IAMRole
"""
execution_role = IAMRole(self.logical_id + 'Role', attributes=self.get_passthrough_resource_attributes())
execution_role.AssumeRolePolicyDocument = IAMRolePolicies.lambda_assume_role_policy()
managed_policy_arns = [ArnGenerator.generate_aws_managed_policy_arn('service-role/AWSLambdaBasicExecutionRole')]
if self.Tracing:
managed_policy_arns.append(ArnGenerator.generate_aws_managed_policy_arn('AWSXrayWriteOnlyAccess'))
function_policies = FunctionPolicies({"Policies": self.Policies},
# No support for policy templates in the "core"
policy_template_processor=None)
policy_documents = []
if self.DeadLetterQueue:
policy_documents.append(IAMRolePolicies.dead_letter_queue_policy(
self.dead_letter_queue_policy_actions[self.DeadLetterQueue['Type']],
self.DeadLetterQueue['TargetArn']))
for index, policy_entry in enumerate(function_policies.get()):
if policy_entry.type is PolicyTypes.POLICY_STATEMENT:
policy_documents.append({
'PolicyName': execution_role.logical_id + 'Policy' + str(index),
'PolicyDocument': policy_entry.data
})
elif policy_entry.type is PolicyTypes.MANAGED_POLICY:
# There are three options:
# Managed Policy Name (string): Try to convert to Managed Policy ARN
# Managed Policy Arn (string): Insert it directly into the list
# Intrinsic Function (dict): Insert it directly into the list
#
# When you insert into managed_policy_arns list, de-dupe to prevent same ARN from showing up twice
#
policy_arn = policy_entry.data
if isinstance(policy_entry.data, string_types) and policy_entry.data in managed_policy_map:
policy_arn = managed_policy_map[policy_entry.data]
# De-Duplicate managed policy arns before inserting. Mainly useful
# when customer specifies a managed policy which is already inserted
# by SAM, such as AWSLambdaBasicExecutionRole
if policy_arn not in managed_policy_arns:
managed_policy_arns.append(policy_arn)
else:
# Policy Templates are not supported here in the "core"
raise InvalidResourceException(
self.logical_id,
"Policy at index {} in the 'Policies' property is not valid".format(index))
execution_role.ManagedPolicyArns = list(managed_policy_arns)
execution_role.Policies = policy_documents or None
execution_role.PermissionsBoundary = self.PermissionsBoundary
return execution_role
|
Constructs a Lambda execution role based on this SAM function's Policies property.
:returns: the generated IAM Role
:rtype: model.iam.IAMRole
|
def CopyToProto(self, proto):
"""Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldnt be serialized, due to to few constructor arguments.
"""
if (self.file is not None and
self._serialized_start is not None and
self._serialized_end is not None):
proto.ParseFromString(self.file.serialized_pb[
self._serialized_start:self._serialized_end])
else:
raise Error('Descriptor does not contain serialization.')
|
Copies this to the matching proto in descriptor_pb2.
Args:
proto: An empty proto instance from descriptor_pb2.
Raises:
Error: If self couldnt be serialized, due to to few constructor arguments.
|
def get_region_nt_counts(region, bam, stranded=False):
"""
Get counts of each nucleotide from a bam file for a given region. If R1 and
R2 reads both overlap a position, only one count will be added. If the R1
and R2 reads disagree at a position they both overlap, that read pair is not
used for that position. Can optionally output strand-specific counts.
Parameters
----------
region : str or list
Region of type chrom:start-end, chrom:start-end:strand, or [chrom,
start, end]. The strand is ignored for chrom:start-end:strand. For
chrom:start-end, the coordinates are one-based inclusive. For example,
the query chr1:10-11 will give you the counts for the 10th and 11th
bases of chr1. For [chrom, start, end], the coordinates are zero-based
and end exclusive (like a bed file). The query [chr1, 9, 11] will give
you the coverage of the 10th and 11th bases of chr1. The region value is
passed directly to pysam's pileup function.
bam : pysam.calignmentfile.AlignmentFile or str
Bam file opened with pysam or path to bam file (must be sorted and
indexed).
stranded : boolean
Boolean indicating whether read data is stranded and stranded nucleotide
counts should be returned. Assumes R1 read on reverse strand implies +
strand coverage etc.
Returns
-------
counts : pandas.DataFrame
Data frame with the counts for each base in the region. The index of
this data frame is one-based for compatibility with VCF files.
"""
# TODO: I should figure out what the different possible values are that
# pysam could give me back (so far I only have ATCGN). Can I get deletions
# and insertions?
# TODO: This could probably be parallelized.
if type(bam) == str:
bam = pysam.AlignmentFile(bam, 'rb')
if type(region) is str:
r = parse_region(region)
if len(r) == 3:
chrom, start, end = r
elif len(r) == 4:
chrom, start, end, strand = r
start = int(start)
end = int(end)
ind = ['{}:{}'.format(chrom, x) for
x in range(start, end + 1)]
pp = bam.pileup(region=region, truncate=True)
elif type(region) is (list or tuple):
chrom, start, end = region
ind = ['{}:{}'.format(chrom, x) for
x in range(int(start) + 1, int(end) + 1)]
pp = bam.pileup(chrom, start, end, truncate=True)
cols = ['A', 'T', 'C', 'G', 'N']
if stranded:
cols = ['{}+'.format(x) for x in cols] + ['{}-'.format(x) for x in cols]
counts = pd.DataFrame(0, index=ind, columns=cols)
for pc in pp:
# Most of this code deals with R1 and R2 reads that overlap so that we
# don't get two counts from one fragment.
pos = pc.reference_pos + 1
r1_qnames = []
r1_nts = []
r2_qnames = []
r2_nts = []
for pr in pc.pileups:
qnames = [r1_qnames, r2_qnames][pr.alignment.is_read2]
nts = [r1_nts, r2_nts][pr.alignment.is_read2]
nt = _pos_nt(pr, pc.reference_pos, stranded)
if nt:
qnames.append(pr.alignment.qname)
nts.append(nt)
r1 = pd.Series(r1_nts, index=r1_qnames)
r2 = pd.Series(r2_nts, index=r2_qnames)
df = pd.DataFrame([r1, r2], index=['R1', 'R2']).T
singles = df[df.isnull().sum(axis=1) == 1]
doubles = df.dropna()
vcs = []
vcs.append(singles['R1'].value_counts())
vcs.append(singles['R2'].value_counts())
doubles = doubles[doubles.R1 == doubles.R2]
vcs.append(doubles.R1.value_counts())
for vc in vcs:
counts.ix['{}:{}'.format(chrom, pos), vc.index] += vc
return counts
|
Get counts of each nucleotide from a bam file for a given region. If R1 and
R2 reads both overlap a position, only one count will be added. If the R1
and R2 reads disagree at a position they both overlap, that read pair is not
used for that position. Can optionally output strand-specific counts.
Parameters
----------
region : str or list
Region of type chrom:start-end, chrom:start-end:strand, or [chrom,
start, end]. The strand is ignored for chrom:start-end:strand. For
chrom:start-end, the coordinates are one-based inclusive. For example,
the query chr1:10-11 will give you the counts for the 10th and 11th
bases of chr1. For [chrom, start, end], the coordinates are zero-based
and end exclusive (like a bed file). The query [chr1, 9, 11] will give
you the coverage of the 10th and 11th bases of chr1. The region value is
passed directly to pysam's pileup function.
bam : pysam.calignmentfile.AlignmentFile or str
Bam file opened with pysam or path to bam file (must be sorted and
indexed).
stranded : boolean
Boolean indicating whether read data is stranded and stranded nucleotide
counts should be returned. Assumes R1 read on reverse strand implies +
strand coverage etc.
Returns
-------
counts : pandas.DataFrame
Data frame with the counts for each base in the region. The index of
this data frame is one-based for compatibility with VCF files.
|
def is_rootlevel(self):
"""
Determine if the Activity is at the root level of a project.
It will look for the name of the parent which should be either ActivityRootNames.WORKFLOW_ROOT or
ActivityRootNames.CATALOG_ROOT. If the name of the parent cannot be found an additional API call is made
to retrieve the parent object (based on the `parent_id` in the json_data).
:return: Return True if it is a root level activity, otherwise return False
:rtype: bool
"""
# when the activity itself is a root, than return False immediately
if self.is_root():
return False
parent_name = None
parent_dict = self._json_data.get('parent_id_name')
if parent_dict and 'name' in parent_dict:
parent_name = parent_dict.get('name')
if not parent_dict:
parent_name = self._client.activity(id=self._json_data.get('parent_id')).name
if parent_name in ActivityRootNames.values():
return True
return False
|
Determine if the Activity is at the root level of a project.
It will look for the name of the parent which should be either ActivityRootNames.WORKFLOW_ROOT or
ActivityRootNames.CATALOG_ROOT. If the name of the parent cannot be found an additional API call is made
to retrieve the parent object (based on the `parent_id` in the json_data).
:return: Return True if it is a root level activity, otherwise return False
:rtype: bool
|
def update(self, friendly_name=values.unset,
default_service_role_sid=values.unset,
default_channel_role_sid=values.unset,
default_channel_creator_role_sid=values.unset,
read_status_enabled=values.unset, reachability_enabled=values.unset,
typing_indicator_timeout=values.unset,
consumption_report_interval=values.unset,
notifications_new_message_enabled=values.unset,
notifications_new_message_template=values.unset,
notifications_new_message_sound=values.unset,
notifications_new_message_badge_count_enabled=values.unset,
notifications_added_to_channel_enabled=values.unset,
notifications_added_to_channel_template=values.unset,
notifications_added_to_channel_sound=values.unset,
notifications_removed_from_channel_enabled=values.unset,
notifications_removed_from_channel_template=values.unset,
notifications_removed_from_channel_sound=values.unset,
notifications_invited_to_channel_enabled=values.unset,
notifications_invited_to_channel_template=values.unset,
notifications_invited_to_channel_sound=values.unset,
pre_webhook_url=values.unset, post_webhook_url=values.unset,
webhook_method=values.unset, webhook_filters=values.unset,
limits_channel_members=values.unset,
limits_user_channels=values.unset,
media_compatibility_message=values.unset,
pre_webhook_retry_count=values.unset,
post_webhook_retry_count=values.unset,
notifications_log_enabled=values.unset):
"""
Update the ServiceInstance
:param unicode friendly_name: A string to describe the resource
:param unicode default_service_role_sid: The service role assigned to users when they are added to the service
:param unicode default_channel_role_sid: The channel role assigned to users when they are added to a channel
:param unicode default_channel_creator_role_sid: The channel role assigned to a channel creator when they join a new channel
:param bool read_status_enabled: Whether to enable the Message Consumption Horizon feature
:param bool reachability_enabled: Whether to enable the Reachability Indicator feature for this Service instance
:param unicode typing_indicator_timeout: How long in seconds to wait before assuming the user is no longer typing
:param unicode consumption_report_interval: DEPRECATED
:param bool notifications_new_message_enabled: Whether to send a notification when a new message is added to a channel
:param unicode notifications_new_message_template: The template to use to create the notification text displayed when a new message is added to a channel
:param unicode notifications_new_message_sound: The name of the sound to play when a new message is added to a channel
:param bool notifications_new_message_badge_count_enabled: Whether the new message badge is enabled
:param bool notifications_added_to_channel_enabled: Whether to send a notification when a member is added to a channel
:param unicode notifications_added_to_channel_template: The template to use to create the notification text displayed when a member is added to a channel
:param unicode notifications_added_to_channel_sound: The name of the sound to play when a member is added to a channel
:param bool notifications_removed_from_channel_enabled: Whether to send a notification to a user when they are removed from a channel
:param unicode notifications_removed_from_channel_template: The template to use to create the notification text displayed to a user when they are removed
:param unicode notifications_removed_from_channel_sound: The name of the sound to play to a user when they are removed from a channel
:param bool notifications_invited_to_channel_enabled: Whether to send a notification when a user is invited to a channel
:param unicode notifications_invited_to_channel_template: The template to use to create the notification text displayed when a user is invited to a channel
:param unicode notifications_invited_to_channel_sound: The name of the sound to play when a user is invited to a channel
:param unicode pre_webhook_url: The webhook URL for pre-event webhooks
:param unicode post_webhook_url: The URL for post-event webhooks
:param unicode webhook_method: The HTTP method to use for both PRE and POST webhooks
:param unicode webhook_filters: The list of WebHook events that are enabled for this Service instance
:param unicode limits_channel_members: The maximum number of Members that can be added to Channels within this Service
:param unicode limits_user_channels: The maximum number of Channels Users can be a Member of within this Service
:param unicode media_compatibility_message: The message to send when a media message has no text
:param unicode pre_webhook_retry_count: Count of times webhook will be retried in case of timeout or 429/503/504 HTTP responses
:param unicode post_webhook_retry_count: The number of times calls to the `post_webhook_url` will be retried
:param bool notifications_log_enabled: Whether to log notifications
:returns: Updated ServiceInstance
:rtype: twilio.rest.chat.v2.service.ServiceInstance
"""
return self._proxy.update(
friendly_name=friendly_name,
default_service_role_sid=default_service_role_sid,
default_channel_role_sid=default_channel_role_sid,
default_channel_creator_role_sid=default_channel_creator_role_sid,
read_status_enabled=read_status_enabled,
reachability_enabled=reachability_enabled,
typing_indicator_timeout=typing_indicator_timeout,
consumption_report_interval=consumption_report_interval,
notifications_new_message_enabled=notifications_new_message_enabled,
notifications_new_message_template=notifications_new_message_template,
notifications_new_message_sound=notifications_new_message_sound,
notifications_new_message_badge_count_enabled=notifications_new_message_badge_count_enabled,
notifications_added_to_channel_enabled=notifications_added_to_channel_enabled,
notifications_added_to_channel_template=notifications_added_to_channel_template,
notifications_added_to_channel_sound=notifications_added_to_channel_sound,
notifications_removed_from_channel_enabled=notifications_removed_from_channel_enabled,
notifications_removed_from_channel_template=notifications_removed_from_channel_template,
notifications_removed_from_channel_sound=notifications_removed_from_channel_sound,
notifications_invited_to_channel_enabled=notifications_invited_to_channel_enabled,
notifications_invited_to_channel_template=notifications_invited_to_channel_template,
notifications_invited_to_channel_sound=notifications_invited_to_channel_sound,
pre_webhook_url=pre_webhook_url,
post_webhook_url=post_webhook_url,
webhook_method=webhook_method,
webhook_filters=webhook_filters,
limits_channel_members=limits_channel_members,
limits_user_channels=limits_user_channels,
media_compatibility_message=media_compatibility_message,
pre_webhook_retry_count=pre_webhook_retry_count,
post_webhook_retry_count=post_webhook_retry_count,
notifications_log_enabled=notifications_log_enabled,
)
|
Update the ServiceInstance
:param unicode friendly_name: A string to describe the resource
:param unicode default_service_role_sid: The service role assigned to users when they are added to the service
:param unicode default_channel_role_sid: The channel role assigned to users when they are added to a channel
:param unicode default_channel_creator_role_sid: The channel role assigned to a channel creator when they join a new channel
:param bool read_status_enabled: Whether to enable the Message Consumption Horizon feature
:param bool reachability_enabled: Whether to enable the Reachability Indicator feature for this Service instance
:param unicode typing_indicator_timeout: How long in seconds to wait before assuming the user is no longer typing
:param unicode consumption_report_interval: DEPRECATED
:param bool notifications_new_message_enabled: Whether to send a notification when a new message is added to a channel
:param unicode notifications_new_message_template: The template to use to create the notification text displayed when a new message is added to a channel
:param unicode notifications_new_message_sound: The name of the sound to play when a new message is added to a channel
:param bool notifications_new_message_badge_count_enabled: Whether the new message badge is enabled
:param bool notifications_added_to_channel_enabled: Whether to send a notification when a member is added to a channel
:param unicode notifications_added_to_channel_template: The template to use to create the notification text displayed when a member is added to a channel
:param unicode notifications_added_to_channel_sound: The name of the sound to play when a member is added to a channel
:param bool notifications_removed_from_channel_enabled: Whether to send a notification to a user when they are removed from a channel
:param unicode notifications_removed_from_channel_template: The template to use to create the notification text displayed to a user when they are removed
:param unicode notifications_removed_from_channel_sound: The name of the sound to play to a user when they are removed from a channel
:param bool notifications_invited_to_channel_enabled: Whether to send a notification when a user is invited to a channel
:param unicode notifications_invited_to_channel_template: The template to use to create the notification text displayed when a user is invited to a channel
:param unicode notifications_invited_to_channel_sound: The name of the sound to play when a user is invited to a channel
:param unicode pre_webhook_url: The webhook URL for pre-event webhooks
:param unicode post_webhook_url: The URL for post-event webhooks
:param unicode webhook_method: The HTTP method to use for both PRE and POST webhooks
:param unicode webhook_filters: The list of WebHook events that are enabled for this Service instance
:param unicode limits_channel_members: The maximum number of Members that can be added to Channels within this Service
:param unicode limits_user_channels: The maximum number of Channels Users can be a Member of within this Service
:param unicode media_compatibility_message: The message to send when a media message has no text
:param unicode pre_webhook_retry_count: Count of times webhook will be retried in case of timeout or 429/503/504 HTTP responses
:param unicode post_webhook_retry_count: The number of times calls to the `post_webhook_url` will be retried
:param bool notifications_log_enabled: Whether to log notifications
:returns: Updated ServiceInstance
:rtype: twilio.rest.chat.v2.service.ServiceInstance
|
def _load_model(self):
"""Loads robot and optionally add grippers."""
super()._load_model()
self.mujoco_robot = Baxter()
if self.has_gripper_right:
self.gripper_right = gripper_factory(self.gripper_right_name)
if not self.gripper_visualization:
self.gripper_right.hide_visualization()
self.mujoco_robot.add_gripper("right_hand", self.gripper_right)
if self.has_gripper_left:
self.gripper_left = gripper_factory(self.gripper_left_name)
if not self.gripper_visualization:
self.gripper_left.hide_visualization()
self.mujoco_robot.add_gripper("left_hand", self.gripper_left)
|
Loads robot and optionally add grippers.
|
def get_default_cassandra_connection():
"""
Return first default cassandra connection
:return:
"""
for alias, conn in get_cassandra_connections():
if conn.connection.default:
return alias, conn
return list(get_cassandra_connections())[0]
|
Return first default cassandra connection
:return:
|
def _set_people(self, people):
""" Sets who the object is sent to """
if hasattr(people, "object_type"):
people = [people]
elif hasattr(people, "__iter__"):
people = list(people)
return people
|
Sets who the object is sent to
|
def _simplify_arguments(arguments):
"""
If positional or keyword arguments are empty return only one or the other.
"""
if len(arguments.args) == 0:
return arguments.kwargs
elif len(arguments.kwargs) == 0:
return arguments.args
else:
return arguments
|
If positional or keyword arguments are empty return only one or the other.
|
def add(self, new_results):
""" Add new benchmark results. """
for result in new_results:
result.update(self.context)
self.results = self.results.append(result, ignore_index=True)
|
Add new benchmark results.
|
def format_number_field(__, prec, number, locale):
"""Formats a number field."""
prec = NUMBER_DECIMAL_DIGITS if prec is None else int(prec)
locale = Locale.parse(locale)
pattern = locale.decimal_formats.get(None)
return pattern.apply(number, locale, force_frac=(prec, prec))
|
Formats a number field.
|
def __ComputeEndByte(self, start, end=None, use_chunks=True):
"""Compute the last byte to fetch for this request.
This is all based on the HTTP spec for Range and
Content-Range.
Note that this is potentially confusing in several ways:
* the value for the last byte is 0-based, eg "fetch 10 bytes
from the beginning" would return 9 here.
* if we have no information about size, and don't want to
use the chunksize, we'll return None.
See the tests for more examples.
Args:
start: byte to start at.
end: (int or None, default: None) Suggested last byte.
use_chunks: (bool, default: True) If False, ignore self.chunksize.
Returns:
Last byte to use in a Range header, or None.
"""
end_byte = end
if start < 0 and not self.total_size:
return end_byte
if use_chunks:
alternate = start + self.chunksize - 1
if end_byte is not None:
end_byte = min(end_byte, alternate)
else:
end_byte = alternate
if self.total_size:
alternate = self.total_size - 1
if end_byte is not None:
end_byte = min(end_byte, alternate)
else:
end_byte = alternate
return end_byte
|
Compute the last byte to fetch for this request.
This is all based on the HTTP spec for Range and
Content-Range.
Note that this is potentially confusing in several ways:
* the value for the last byte is 0-based, eg "fetch 10 bytes
from the beginning" would return 9 here.
* if we have no information about size, and don't want to
use the chunksize, we'll return None.
See the tests for more examples.
Args:
start: byte to start at.
end: (int or None, default: None) Suggested last byte.
use_chunks: (bool, default: True) If False, ignore self.chunksize.
Returns:
Last byte to use in a Range header, or None.
|
def run_ut_python3_qemu_internal():
"""this runs inside the vm"""
pkg = glob.glob('mxnet_dist/*.whl')[0]
logging.info("=== NOW Running inside QEMU ===")
logging.info("PIP Installing %s", pkg)
check_call(['sudo', 'pip3', 'install', pkg])
logging.info("PIP Installing mxnet/test_requirements.txt")
check_call(['sudo', 'pip3', 'install', '-r', 'mxnet/test_requirements.txt'])
logging.info("Running tests in mxnet/tests/python/unittest/")
check_call(['nosetests', '--with-timer', '--with-xunit', '--xunit-file', 'nosetests_unittest.xml', '--verbose', 'mxnet/tests/python/unittest/test_engine.py'])
|
this runs inside the vm
|
def do_status(self, arg):
''' Print information about the arm. '''
info = self.arm.get_info()
max_len = len(max(info.keys(), key=len))
print(self.style.theme('\nArm Status'))
for key, value in info.items():
print(self.style.help(key.ljust(max_len + 2), str(value)))
print()
|
Print information about the arm.
|
def parse(self, buf: memoryview, params: Params) \
-> Tuple[Command, memoryview]:
"""Parse the given bytes into a command. The basic syntax is a tag
string, a command name, possibly some arguments, and then an endline.
If the command has a complete structure but cannot be parsed, an
:class:`InvalidCommand` is returned.
Args:
buf: The bytes to parse.
params: The parsing parameters.
"""
try:
tag, buf = Tag.parse(buf, params)
except NotParseable as exc:
return InvalidCommand(params, exc), buf[0:0]
else:
params = params.copy(tag=tag.value)
cmd_parts: List[bytes] = []
while True:
try:
_, buf = Space.parse(buf, params)
atom, buf = Atom.parse(buf, params)
cmd_parts.append(atom.value.upper())
except NotParseable as exc:
return InvalidCommand(params, exc), buf[0:0]
command = b' '.join(cmd_parts)
cmd_type = self.commands.get(command)
if not cmd_type:
return InvalidCommand(params, None, command), buf[0:0]
elif not cmd_type.compound:
break
params = params.copy(command_name=command)
try:
return cmd_type.parse(buf, params)
except NotParseable as exc:
return InvalidCommand(params, exc, command, cmd_type), buf[0:0]
|
Parse the given bytes into a command. The basic syntax is a tag
string, a command name, possibly some arguments, and then an endline.
If the command has a complete structure but cannot be parsed, an
:class:`InvalidCommand` is returned.
Args:
buf: The bytes to parse.
params: The parsing parameters.
|
def object_to_json(obj):
"""Convert object that cannot be natively serialized by python to JSON representation."""
if isinstance(obj, (datetime.datetime, datetime.date, datetime.time)):
return obj.isoformat()
return str(obj)
|
Convert object that cannot be natively serialized by python to JSON representation.
|
def multi_p_run(tot_num, _func, worker, params, n_process):
"""
Run _func with multi-process using params.
"""
from multiprocessing import Process, Queue
out_q = Queue()
procs = []
split_num = split_seq(list(range(0, tot_num)), n_process)
print(tot_num, ">>", split_num)
split_len = len(split_num)
if n_process > split_len:
n_process = split_len
for i in range(n_process):
_p = Process(target=_func,
args=(worker, split_num[i][0], split_num[i][1],
params, out_q))
_p.daemon = True
procs.append(_p)
_p.start()
try:
result = []
for i in range(n_process):
result.append(out_q.get())
for i in procs:
i.join()
except KeyboardInterrupt:
print('Killing all the children in the pool.')
for i in procs:
i.terminate()
i.join()
return -1
while not out_q.empty():
print(out_q.get(block=False))
return result
|
Run _func with multi-process using params.
|
def _get(self, uri):
"""
Handles the communication with the API when getting
a specific resource managed by this class.
"""
resp, resp_body = self.api.method_get(uri)
return self.resource_class(self, resp_body, self.response_key,
loaded=True)
|
Handles the communication with the API when getting
a specific resource managed by this class.
|
def response(self):
"""
Dictionary of public and private, hostnames and ips.
:rtype: dict
"""
describe_request_params = {}
if self.filter is not None:
if type(self.filter) is not dict:
try:
filters = json.loads(self.filter)
except TypeError:
filters = self._parse_cli_filters(self.filter)
else:
filters = self.filter
describe_request_params['Filters'] = filters
if self.vpc_ids is not None:
if 'Filters' not in describe_request_params:
describe_request_params['Filters'] = []
describe_request_params['Filters'].append({
'Name': 'vpc-id',
'Values': self.vpc_ids.split(',')
})
reservations = self.session().client('ec2').describe_instances(**describe_request_params)
return self._process_reservations(reservations)
|
Dictionary of public and private, hostnames and ips.
:rtype: dict
|
def _LinearMapByteStream(
self, byte_stream, byte_offset=0, context=None, **unused_kwargs):
"""Maps a data type sequence on a byte stream.
Args:
byte_stream (bytes): byte stream.
byte_offset (Optional[int]): offset into the byte stream where to start.
context (Optional[DataTypeMapContext]): data type map context.
Returns:
tuple[object, ...]: mapped values.
Raises:
MappingError: if the data type definition cannot be mapped on
the byte stream.
"""
elements_data_size = self._data_type_definition.GetByteSize()
self._CheckByteStreamSize(byte_stream, byte_offset, elements_data_size)
try:
struct_tuple = self._operation.ReadFrom(byte_stream[byte_offset:])
mapped_values = map(self._element_data_type_map.MapValue, struct_tuple)
except Exception as exception:
error_string = (
'Unable to read: {0:s} from byte stream at offset: {1:d} '
'with error: {2!s}').format(
self._data_type_definition.name, byte_offset, exception)
raise errors.MappingError(error_string)
if context:
context.byte_size = elements_data_size
return tuple(mapped_values)
|
Maps a data type sequence on a byte stream.
Args:
byte_stream (bytes): byte stream.
byte_offset (Optional[int]): offset into the byte stream where to start.
context (Optional[DataTypeMapContext]): data type map context.
Returns:
tuple[object, ...]: mapped values.
Raises:
MappingError: if the data type definition cannot be mapped on
the byte stream.
|
def codes2unicode(codes, composed=True):
''' Convert Hanyang-PUA code iterable to Syllable-Initial-Peak-Final
encoded unicode string.
:param codes:
an iterable of Hanyang-PUA code
:param composed:
the result should be composed as much as possible (default True)
:return: Syllable-Initial-Peak-Final encoded unicode string
'''
pua = u''.join(unichr(code) for code in codes)
return translate(pua, composed=composed)
|
Convert Hanyang-PUA code iterable to Syllable-Initial-Peak-Final
encoded unicode string.
:param codes:
an iterable of Hanyang-PUA code
:param composed:
the result should be composed as much as possible (default True)
:return: Syllable-Initial-Peak-Final encoded unicode string
|
def write(self, s):
"""
Write wrapper.
Parameters
----------
s : bytes
Bytes to write
"""
try:
self._write_lock.acquire()
self.handle.sendall(s)
except socket.timeout:
self._connect()
except socket.error:
raise IOError
finally:
self._write_lock.release()
|
Write wrapper.
Parameters
----------
s : bytes
Bytes to write
|
def _write_current_buffer_for_group_key(self, key):
"""
Find the buffer for a given group key, prepare it to be written
and writes it calling write() method.
"""
write_info = self.write_buffer.pack_buffer(key)
self.write(write_info.get('file_path'),
self.write_buffer.grouping_info[key]['membership'])
self.write_buffer.clean_tmp_files(write_info)
self.write_buffer.add_new_buffer_for_group(key)
|
Find the buffer for a given group key, prepare it to be written
and writes it calling write() method.
|
def mode_string_v10(msg):
'''mode string for 1.0 protocol, from heartbeat'''
if msg.autopilot == mavlink.MAV_AUTOPILOT_PX4:
return interpret_px4_mode(msg.base_mode, msg.custom_mode)
if not msg.base_mode & mavlink.MAV_MODE_FLAG_CUSTOM_MODE_ENABLED:
return "Mode(0x%08x)" % msg.base_mode
if msg.type in [ mavlink.MAV_TYPE_QUADROTOR, mavlink.MAV_TYPE_HEXAROTOR,
mavlink.MAV_TYPE_OCTOROTOR, mavlink.MAV_TYPE_TRICOPTER,
mavlink.MAV_TYPE_COAXIAL,
mavlink.MAV_TYPE_HELICOPTER ]:
if msg.custom_mode in mode_mapping_acm:
return mode_mapping_acm[msg.custom_mode]
if msg.type == mavlink.MAV_TYPE_FIXED_WING:
if msg.custom_mode in mode_mapping_apm:
return mode_mapping_apm[msg.custom_mode]
if msg.type == mavlink.MAV_TYPE_GROUND_ROVER:
if msg.custom_mode in mode_mapping_rover:
return mode_mapping_rover[msg.custom_mode]
if msg.type == mavlink.MAV_TYPE_ANTENNA_TRACKER:
if msg.custom_mode in mode_mapping_tracker:
return mode_mapping_tracker[msg.custom_mode]
return "Mode(%u)" % msg.custom_mode
|
mode string for 1.0 protocol, from heartbeat
|
def find_features(seqs, locus_tag="all", utr_len=200):
"""Find features in sequences by locus tag"""
found_features = []
for seq_i in seqs:
for feature in seq_i.features:
if feature.type == "CDS" and (locus_tag == "all" or \
('locus_tag' in feature.qualifiers and \
feature.qualifiers['locus_tag'][0] == locus_tag)):
start = max(0, feature.location.nofuzzy_start - utr_len)
stop = max(0, feature.location.nofuzzy_end + utr_len)
feature_seq = seq_i.seq[start:stop]
f_match = FeatureMatch(feature, feature_seq, feature.strand,
utr_len)
found_features.append(f_match)
return found_features
|
Find features in sequences by locus tag
|
def ase(dbuser, dbpassword, args, gui):
"""Connection to atomic structures on the Catalysis-Hub
server with ase db cli.
Arguments to the the ase db cli client must be enclosed in one string.
For example: <cathub ase 'formula=Ag6In6H -s energy -L 200'>.
To see possible ase db arguments run <ase db --help>"""
if dbuser == 'upload':
dbpassword = 'cHyuuQH0'
db = CathubPostgreSQL(user=dbuser, password=dbpassword)
db._connect()
server_name = db.server_name
subprocess.call(
("ase db {} {}".format(server_name, args)).split())
if gui:
args = args.split('-')[0]
subprocess.call(
('ase gui {}@{}'.format(server_name, args)).split())
|
Connection to atomic structures on the Catalysis-Hub
server with ase db cli.
Arguments to the the ase db cli client must be enclosed in one string.
For example: <cathub ase 'formula=Ag6In6H -s energy -L 200'>.
To see possible ase db arguments run <ase db --help>
|
def update_domain_queues(self):
'''
Check to update existing queues already in memory
new queues are created elsewhere
'''
for key in self.domain_config:
final_key = "{name}:{domain}:queue".format(
name=self.spider.name,
domain=key)
# we already have a throttled queue for this domain, update it to new settings
if final_key in self.queue_dict:
self.queue_dict[final_key][0].window = float(self.domain_config[key]['window'])
self.logger.debug("Updated queue {q} with new config"
.format(q=final_key))
# if scale is applied, scale back; otherwise use updated hits
if 'scale' in self.domain_config[key]:
# round to int
hits = int(self.domain_config[key]['hits'] * self.fit_scale(
self.domain_config[key]['scale']))
self.queue_dict[final_key][0].limit = float(hits)
else:
self.queue_dict[final_key][0].limit = float(self.domain_config[key]['hits'])
|
Check to update existing queues already in memory
new queues are created elsewhere
|
def search_mergedcell_value(xl_sheet, merged_range):
"""
Search for a value in merged_range cells.
"""
for search_row_idx in range(merged_range[0], merged_range[1]):
for search_col_idx in range(merged_range[2], merged_range[3]):
if xl_sheet.cell(search_row_idx, search_col_idx).value:
return xl_sheet.cell(search_row_idx, search_col_idx)
return False
|
Search for a value in merged_range cells.
|
def _clear(self):
'''
Actual clear
'''
ret = ([],[])
for q in self.queues.values():
pr = q._clear()
ret[0].extend(pr[0])
ret[1].extend(pr[1])
self.totalSize = 0
del self.prioritySet[:]
if self.isWaited and self.canAppend():
self.isWaited = False
ret[0].append(QueueCanWriteEvent(self))
if self.isWaitEmpty and not self:
self.isWaitEmpty = False
ret[1].append(QueueIsEmptyEvent(self))
self.blockEvents.clear()
return ret
|
Actual clear
|
def dataframe(self):
"""
Returns a pandas DataFrame containing all other class properties and
values. The index for the DataFrame is the string URI that is used to
instantiate the class, such as '201806070VEG'.
"""
if self._away_goals is None and self._home_goals is None:
return None
fields_to_include = {
'arena': self.arena,
'attendance': self.attendance,
'away_assists': self.away_assists,
'away_even_strength_assists': self.away_even_strength_assists,
'away_even_strength_goals': self.away_even_strength_goals,
'away_game_winning_goals': self.away_game_winning_goals,
'away_goals': self.away_goals,
'away_penalties_in_minutes': self.away_penalties_in_minutes,
'away_points': self.away_points,
'away_power_play_assists': self.away_power_play_assists,
'away_power_play_goals': self.away_power_play_goals,
'away_save_percentage': self.away_save_percentage,
'away_saves': self.away_saves,
'away_shooting_percentage': self.away_shooting_percentage,
'away_short_handed_assists': self.away_short_handed_assists,
'away_short_handed_goals': self.away_short_handed_goals,
'away_shots_on_goal': self.away_shots_on_goal,
'away_shutout': self.away_shutout,
'date': self.date,
'duration': self.duration,
'home_assists': self.home_assists,
'home_even_strength_assists': self.home_even_strength_assists,
'home_even_strength_goals': self.home_even_strength_goals,
'home_game_winning_goals': self.home_game_winning_goals,
'home_goals': self.home_goals,
'home_penalties_in_minutes': self.home_penalties_in_minutes,
'home_points': self.home_points,
'home_power_play_assists': self.home_power_play_assists,
'home_power_play_goals': self.home_power_play_goals,
'home_save_percentage': self.home_save_percentage,
'home_saves': self.home_saves,
'home_shooting_percentage': self.home_shooting_percentage,
'home_short_handed_assists': self.home_short_handed_assists,
'home_short_handed_goals': self.home_short_handed_goals,
'home_shots_on_goal': self.home_shots_on_goal,
'home_shutout': self.home_shutout,
'losing_abbr': self.losing_abbr,
'losing_name': self.losing_name,
'time': self.time,
'winner': self.winner,
'winning_abbr': self.winning_abbr,
'winning_name': self.winning_name
}
return pd.DataFrame([fields_to_include], index=[self._uri])
|
Returns a pandas DataFrame containing all other class properties and
values. The index for the DataFrame is the string URI that is used to
instantiate the class, such as '201806070VEG'.
|
def GetByteSize(self):
"""Retrieves the byte size of the data type definition.
Returns:
int: data type size in bytes or None if size cannot be determined.
"""
if not self.element_data_type_definition:
return None
if self.elements_data_size:
return self.elements_data_size
if not self.number_of_elements:
return None
element_byte_size = self.element_data_type_definition.GetByteSize()
if not element_byte_size:
return None
return element_byte_size * self.number_of_elements
|
Retrieves the byte size of the data type definition.
Returns:
int: data type size in bytes or None if size cannot be determined.
|
def check_positive_flux(cls, kwargs_ps):
"""
check whether inferred linear parameters are positive
:param kwargs_ps:
:return: bool
"""
pos_bool = True
for kwargs in kwargs_ps:
point_amp = kwargs['point_amp']
for amp in point_amp:
if amp < 0:
pos_bool = False
break
return pos_bool
|
check whether inferred linear parameters are positive
:param kwargs_ps:
:return: bool
|
def load_genotypes(self):
"""Actually loads the first chunk of genotype data into memory due to \
the individual oriented format of MACH data.
Due to the fragmented approach to data loading necessary to avoid
running out of RAM, this function will initialize the data structures
with the first chunk of loci and prepare it for otherwise normal
iteration.
Also, because the parser can be assigned more than one .gen file to
read from, it will automatically move to the next file when the
first is exhausted.
"""
lb = self.chunk * Parser.chunk_stride + 2
ub = (self.chunk + 1) * Parser.chunk_stride + 2
buff = None
self.current_file = self.archives[self.file_index]
self.info_file = self.info_files[self.file_index]
while buff is None:
try:
buff = self.parse_genotypes(lb, ub)
except EOFError:
buff = None
if self.file_index < (len(self.archives) - 1):
self.file_index += 1
self.chunk = 0
lb = self.chunk * Parser.chunk_stride + 2
ub = (self.chunk + 1) * Parser.chunk_stride + 2
self.current_file = self.archives[self.file_index]
self.info_file = self.info_files[self.file_index]
else:
raise StopIteration
# Numpy's usecols don't prevent it from loading entire file, which is
# too big considering ours are 60+ gigs
self.dosages = numpy.transpose(buff)
file = self.openfile(self.info_file)
file.readline() # drop header
lindex = 0
while lindex < lb - 2:
file.readline()
lindex += 1
self.markers = []
self.rsids = []
self.locus_count= 0
self.maf = []
self.alleles = []
self.rsquared = []
while lindex < (ub - 2):
words = file.readline().strip().split()
if len(words) > 0:
loc, al2, al1, freq1, maf, avgcall,rsq = words[0:7]
marker = [-1, lindex]
if self.chrpos_encoding:
marker = [int(x) for x in loc.split(":")[0:2]]
if len(marker) < 2:
raise libgwas.exceptions.MalformedInputFile("MACH .info"+
" file IDs must be in the format chrom:rsid")
if len(marker) > 2:
self.rsids.append(marker[2])
self.markers.append(marker[0:2])
else:
self.markers.append(lindex)
self.rsids.append(loc)
self.maf.append(float(maf))
self.alleles.append([al1, al2])
self.rsquared.append(float(rsq))
lindex += 1
else:
break
if self.dosages.shape[0] != len(self.markers):
print >> sys.stderr, "What is going on? I have ", \
self.dosages.shape[0], "dosages per individual and ", \
len(self.markers), self.markers
self.chunk += 1
self.marker_count = len(self.markers)
|
Actually loads the first chunk of genotype data into memory due to \
the individual oriented format of MACH data.
Due to the fragmented approach to data loading necessary to avoid
running out of RAM, this function will initialize the data structures
with the first chunk of loci and prepare it for otherwise normal
iteration.
Also, because the parser can be assigned more than one .gen file to
read from, it will automatically move to the next file when the
first is exhausted.
|
def _on_io_events(self, fd=None, _events=None):
"""Invoked by Tornado's IOLoop when there are events for the fd
:param int fd: The file descriptor for the event
:param int _events: The events raised
"""
if fd not in self._connections:
LOGGER.warning('Received IO event for non-existing connection')
return
self._poll_connection(fd)
|
Invoked by Tornado's IOLoop when there are events for the fd
:param int fd: The file descriptor for the event
:param int _events: The events raised
|
def __get_overall_data(self, x):
"""
(recursive) Collect all "sensorGenus" and "sensorSpecies" fields, set data to self
:param any x: Any data type
:return none:
"""
if isinstance(x, dict):
if "sensorGenus" in x:
if x["sensorGenus"] and x["sensorGenus"] not in self.lsts_tmp["genus"]:
self.lsts_tmp["genus"].append(x["sensorGenus"])
if "sensorSpecies" in x:
if x["sensorSpecies"] and x["sensorSpecies"] not in self.lsts_tmp["species"]:
self.lsts_tmp["species"].append(x["sensorSpecies"])
if "archiveType" in x:
if x["archiveType"] and x["archiveType"] not in self.lsts_tmp["archive"]:
self.lsts_tmp["archive"].append(x["archiveType"])
if "QCnotes" in x:
if x["QCnotes"] and x["QCnotes"] not in self.lsts_tmp["qc"]:
self.lsts_tmp["qc"].append(x["QCnotes"])
for k, v in x.items():
if isinstance(v, dict):
self.__get_overall_data(v)
elif isinstance(v, list):
self.__get_overall_data(v)
elif isinstance(x, list):
for i in x:
self.__get_overall_data(i)
return x
|
(recursive) Collect all "sensorGenus" and "sensorSpecies" fields, set data to self
:param any x: Any data type
:return none:
|
def set_sample_probability(probability):
"""Set the probability that a batch will be submitted to the InfluxDB
server. This should be a value that is greater than or equal to ``0`` and
less than or equal to ``1.0``. A value of ``0.25`` would represent a
probability of 25% that a batch would be written to InfluxDB.
:param float probability: The value between 0 and 1.0 that represents the
probability that a batch will be submitted to the InfluxDB server.
"""
global _sample_probability
if not 0.0 <= probability <= 1.0:
raise ValueError('Invalid probability value')
LOGGER.debug('Setting sample probability to %.2f', probability)
_sample_probability = float(probability)
|
Set the probability that a batch will be submitted to the InfluxDB
server. This should be a value that is greater than or equal to ``0`` and
less than or equal to ``1.0``. A value of ``0.25`` would represent a
probability of 25% that a batch would be written to InfluxDB.
:param float probability: The value between 0 and 1.0 that represents the
probability that a batch will be submitted to the InfluxDB server.
|
def conv_gru(x,
kernel_size,
filters,
padding="SAME",
dilation_rate=(1, 1),
name=None,
reuse=None):
"""Convolutional GRU in 1 dimension."""
# Let's make a shorthand for conv call first.
def do_conv(args, name, bias_start, padding):
return conv(
args,
filters,
kernel_size,
padding=padding,
dilation_rate=dilation_rate,
bias_initializer=tf.constant_initializer(bias_start),
name=name)
# Here comes the GRU gate.
with tf.variable_scope(
name, default_name="conv_gru", values=[x], reuse=reuse):
reset = saturating_sigmoid(do_conv(x, "reset", 1.0, padding))
gate = saturating_sigmoid(do_conv(x, "gate", 1.0, padding))
candidate = tf.tanh(do_conv(reset * x, "candidate", 0.0, padding))
return gate * x + (1 - gate) * candidate
|
Convolutional GRU in 1 dimension.
|
def run_transaction(transactor, callback):
"""Run a transaction with retries.
``callback()`` will be called with one argument to execute the
transaction. ``callback`` may be called more than once; it should have
no side effects other than writes to the database on the given
connection. ``callback`` should not call ``commit()` or ``rollback()``;
these will be called automatically.
The ``transactor`` argument may be one of the following types:
* `sqlalchemy.engine.Connection`: the same connection is passed to the callback.
* `sqlalchemy.engine.Engine`: a connection is created and passed to the callback.
* `sqlalchemy.orm.sessionmaker`: a session is created and passed to the callback.
"""
if isinstance(transactor, sqlalchemy.engine.Connection):
return _txn_retry_loop(transactor, callback)
elif isinstance(transactor, sqlalchemy.engine.Engine):
with transactor.connect() as connection:
return _txn_retry_loop(connection, callback)
elif isinstance(transactor, sqlalchemy.orm.sessionmaker):
session = transactor(autocommit=True)
return _txn_retry_loop(session, callback)
else:
raise TypeError("don't know how to run a transaction on %s", type(transactor))
|
Run a transaction with retries.
``callback()`` will be called with one argument to execute the
transaction. ``callback`` may be called more than once; it should have
no side effects other than writes to the database on the given
connection. ``callback`` should not call ``commit()` or ``rollback()``;
these will be called automatically.
The ``transactor`` argument may be one of the following types:
* `sqlalchemy.engine.Connection`: the same connection is passed to the callback.
* `sqlalchemy.engine.Engine`: a connection is created and passed to the callback.
* `sqlalchemy.orm.sessionmaker`: a session is created and passed to the callback.
|
def escape_identifier(text, reg=KWD_RE):
"""Escape partial C identifiers so they can be used as
attributes/arguments"""
# see http://docs.python.org/reference/lexical_analysis.html#identifiers
if not text:
return "_"
if text[0].isdigit():
text = "_" + text
return reg.sub(r"\1_", text)
|
Escape partial C identifiers so they can be used as
attributes/arguments
|
def get(self):
"""
Get a JSON-ready representation of this ClickTracking.
:returns: This ClickTracking, ready for use in a request body.
:rtype: dict
"""
click_tracking = {}
if self.enable is not None:
click_tracking["enable"] = self.enable
if self.enable_text is not None:
click_tracking["enable_text"] = self.enable_text
return click_tracking
|
Get a JSON-ready representation of this ClickTracking.
:returns: This ClickTracking, ready for use in a request body.
:rtype: dict
|
def iter_successors(self, graph, orig, branch, turn, tick, *, forward=None):
"""Iterate over successors of a given origin node at a given time."""
if self.db._no_kc:
yield from self._adds_dels_sucpred(self.successors[graph, orig], branch, turn, tick)[0]
return
if forward is None:
forward = self.db._forward
yield from self._get_destcache(graph, orig, branch, turn, tick, forward=forward)
|
Iterate over successors of a given origin node at a given time.
|
def to_vector(np_array):
"""Convert numpy array to MLlib Vector
"""
if len(np_array.shape) == 1:
return Vectors.dense(np_array)
else:
raise Exception("An MLLib Vector can only be created from a one-dimensional " +
"numpy array, got {}".format(len(np_array.shape)))
|
Convert numpy array to MLlib Vector
|
def nextindx(self):
'''
Determine the next insert offset according to storage.
Returns:
int: The next insert offset.
'''
indx = 0
with s_lmdbslab.Scan(self.slab, self.db) as curs:
last_key = curs.last_key()
if last_key is not None:
indx = s_common.int64un(last_key) + 1
return indx
|
Determine the next insert offset according to storage.
Returns:
int: The next insert offset.
|
def load_config(self, config=None):
''' loads a config file
Parameters:
config (str):
Optional name of manual config file to load
'''
# Read the config file
cfgname = (config or self.config_name)
cfgname = 'sdsswork' if cfgname is None else cfgname
assert isinstance(cfgname, six.string_types), 'config name must be a string'
config_name = cfgname if cfgname.endswith('.cfg') else '{0}.cfg'.format(cfgname)
self.configfile = os.path.join(self.treedir, 'data', config_name)
assert os.path.isfile(self.configfile) is True, 'configfile {0} must exist in the proper directory'.format(self.configfile)
self._cfg = SafeConfigParser()
try:
self._cfg.read(self.configfile.decode('utf-8'))
except AttributeError:
self._cfg.read(self.configfile)
# create the local tree environment
self.environ = OrderedDict()
self.environ['default'] = self._cfg.defaults()
# set the filesystem envvar to sas_base_dir
self._file_replace = '@FILESYSTEM@'
if self.environ['default']['filesystem'] == self._file_replace:
self.environ['default']['filesystem'] = self.sasbasedir
|
loads a config file
Parameters:
config (str):
Optional name of manual config file to load
|
def cleanUpdatesList(self, col, cellIdx, seg):
"""
Removes any update that would be for the given col, cellIdx, segIdx.
NOTE: logically, we need to do this when we delete segments, so that if
an update refers to a segment that was just deleted, we also remove
that update from the update list. However, I haven't seen it trigger
in any of the unit tests yet, so it might mean that it's not needed
and that situation doesn't occur, by construction.
todo: check if that situation occurs.
"""
for key, updateList in self.segmentUpdates.iteritems():
c,i = key[0], key[1]
if c == col and i == cellIdx:
for update in updateList:
if update[1].segment == seg:
self.removeSegmentUpdate(update)
|
Removes any update that would be for the given col, cellIdx, segIdx.
NOTE: logically, we need to do this when we delete segments, so that if
an update refers to a segment that was just deleted, we also remove
that update from the update list. However, I haven't seen it trigger
in any of the unit tests yet, so it might mean that it's not needed
and that situation doesn't occur, by construction.
todo: check if that situation occurs.
|
def api(server, command, *args, **kwargs):
'''
Call the Spacewalk xmlrpc api.
CLI Example:
.. code-block:: bash
salt-run spacewalk.api spacewalk01.domain.com systemgroup.create MyGroup Description
salt-run spacewalk.api spacewalk01.domain.com systemgroup.create arguments='["MyGroup", "Description"]'
State Example:
.. code-block:: yaml
create_group:
salt.runner:
- name: spacewalk.api
- server: spacewalk01.domain.com
- command: systemgroup.create
- arguments:
- MyGroup
- Description
'''
if 'arguments' in kwargs:
arguments = kwargs['arguments']
else:
arguments = args
call = '{0} {1}'.format(command, arguments)
try:
client, key = _get_session(server)
except Exception as exc:
err_msg = 'Exception raised when connecting to spacewalk server ({0}): {1}'.format(server, exc)
log.error(err_msg)
return {call: err_msg}
namespace, method = command.split('.')
endpoint = getattr(getattr(client, namespace), method)
try:
output = endpoint(key, *arguments)
except Exception as e:
output = 'API call failed: {0}'.format(e)
return {call: output}
|
Call the Spacewalk xmlrpc api.
CLI Example:
.. code-block:: bash
salt-run spacewalk.api spacewalk01.domain.com systemgroup.create MyGroup Description
salt-run spacewalk.api spacewalk01.domain.com systemgroup.create arguments='["MyGroup", "Description"]'
State Example:
.. code-block:: yaml
create_group:
salt.runner:
- name: spacewalk.api
- server: spacewalk01.domain.com
- command: systemgroup.create
- arguments:
- MyGroup
- Description
|
def get_issuer(request):
"""
Gets the Issuer of the Logout Request Message
:param request: Logout Request Message
:type request: string|DOMDocument
:return: The Issuer
:rtype: string
"""
if isinstance(request, etree._Element):
elem = request
else:
if isinstance(request, Document):
request = request.toxml()
elem = fromstring(request, forbid_dtd=True)
issuer = None
issuer_nodes = OneLogin_Saml2_Utils.query(elem, '/samlp:LogoutRequest/saml:Issuer')
if len(issuer_nodes) == 1:
issuer = OneLogin_Saml2_Utils.element_text(issuer_nodes[0])
return issuer
|
Gets the Issuer of the Logout Request Message
:param request: Logout Request Message
:type request: string|DOMDocument
:return: The Issuer
:rtype: string
|
def calibrate(filename):
"""
Append the calibration parameters as variables of the netcdf file.
Keyword arguments:
filename -- the name of a netcdf file.
"""
params = calibration_to(filename)
with nc.loader(filename) as root:
for key, value in params.items():
nc.getdim(root, 'xc_1', 1)
nc.getdim(root, 'yc_1', 1)
if isinstance(value, list):
for i in range(len(value)):
nc.getvar(root, '%s_%i' % (key, i), 'f4', ('time', 'yc_1', 'xc_1' ))[:] = value[i]
else:
nc.getvar(root, key, 'f4', ('time', 'yc_1', 'xc_1'))[:] = value
|
Append the calibration parameters as variables of the netcdf file.
Keyword arguments:
filename -- the name of a netcdf file.
|
def add_nodes_from(self, nodes, weights=None):
"""
Add multiple nodes to the Graph.
**The behviour of adding weights is different than in networkx.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, or any hashable python
object).
weights: list, tuple (default=None)
A container of weights (int, float). The weight value at index i
is associated with the variable at index i.
Examples
--------
>>> from pgmpy.base import DAG
>>> G = DAG()
>>> G.add_nodes_from(nodes=['A', 'B', 'C'])
>>> sorted(G.nodes())
['A', 'B', 'C']
Adding nodes with weights:
>>> G.add_nodes_from(nodes=['D', 'E'], weights=[0.3, 0.6])
>>> G.node['D']
{'weight': 0.3}
>>> G.node['E']
{'weight': 0.6}
>>> G.node['A']
{'weight': None}
"""
nodes = list(nodes)
if weights:
if len(nodes) != len(weights):
raise ValueError("The number of elements in nodes and weights"
"should be equal.")
for index in range(len(nodes)):
self.add_node(node=nodes[index], weight=weights[index])
else:
for node in nodes:
self.add_node(node=node)
|
Add multiple nodes to the Graph.
**The behviour of adding weights is different than in networkx.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, or any hashable python
object).
weights: list, tuple (default=None)
A container of weights (int, float). The weight value at index i
is associated with the variable at index i.
Examples
--------
>>> from pgmpy.base import DAG
>>> G = DAG()
>>> G.add_nodes_from(nodes=['A', 'B', 'C'])
>>> sorted(G.nodes())
['A', 'B', 'C']
Adding nodes with weights:
>>> G.add_nodes_from(nodes=['D', 'E'], weights=[0.3, 0.6])
>>> G.node['D']
{'weight': 0.3}
>>> G.node['E']
{'weight': 0.6}
>>> G.node['A']
{'weight': None}
|
def correlate(h1, h2): # 31 us @array, 55 us @list \w 100 bins
r"""
Correlation between two histograms.
The histogram correlation between two histograms :math:`H` and :math:`H'` of size :math:`m`
is defined as:
.. math::
d_{corr}(H, H') =
\frac{
\sum_{m=1}^M (H_m-\bar{H}) \cdot (H'_m-\bar{H'})
}{
\sqrt{\sum_{m=1}^M (H_m-\bar{H})^2 \cdot \sum_{m=1}^M (H'_m-\bar{H'})^2}
}
with :math:`\bar{H}` and :math:`\bar{H'}` being the mean values of :math:`H` resp. :math:`H'`
*Attributes:*
- not a metric, a similarity
*Attributes for normalized histograms:*
- :math:`d(H, H')\in[-1, 1]`
- :math:`d(H, H) = 1`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-normalized histograms:*
- :math:`d(H, H')\in[-1, 1]`
- :math:`d(H, H) = 1`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-equal histograms:*
- not applicable
Parameters
----------
h1 : sequence
The first histogram.
h2 : sequence
The second histogram, same bins as ``h1``.
Returns
-------
correlate : float
Correlation between the histograms.
Notes
-----
Returns 0 if one of h1 or h2 contain only zeros.
"""
h1, h2 = __prepare_histogram(h1, h2)
h1m = h1 - scipy.sum(h1) / float(h1.size)
h2m = h2 - scipy.sum(h2) / float(h2.size)
a = scipy.sum(scipy.multiply(h1m, h2m))
b = math.sqrt(scipy.sum(scipy.square(h1m)) * scipy.sum(scipy.square(h2m)))
return 0 if 0 == b else a / b
|
r"""
Correlation between two histograms.
The histogram correlation between two histograms :math:`H` and :math:`H'` of size :math:`m`
is defined as:
.. math::
d_{corr}(H, H') =
\frac{
\sum_{m=1}^M (H_m-\bar{H}) \cdot (H'_m-\bar{H'})
}{
\sqrt{\sum_{m=1}^M (H_m-\bar{H})^2 \cdot \sum_{m=1}^M (H'_m-\bar{H'})^2}
}
with :math:`\bar{H}` and :math:`\bar{H'}` being the mean values of :math:`H` resp. :math:`H'`
*Attributes:*
- not a metric, a similarity
*Attributes for normalized histograms:*
- :math:`d(H, H')\in[-1, 1]`
- :math:`d(H, H) = 1`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-normalized histograms:*
- :math:`d(H, H')\in[-1, 1]`
- :math:`d(H, H) = 1`
- :math:`d(H, H') = d(H', H)`
*Attributes for not-equal histograms:*
- not applicable
Parameters
----------
h1 : sequence
The first histogram.
h2 : sequence
The second histogram, same bins as ``h1``.
Returns
-------
correlate : float
Correlation between the histograms.
Notes
-----
Returns 0 if one of h1 or h2 contain only zeros.
|
def update_execution_state_kernel(self):
"""Update actions following the execution state of the kernel."""
client = self.get_current_client()
if client is not None:
executing = client.stop_button.isEnabled()
self.interrupt_action.setEnabled(executing)
|
Update actions following the execution state of the kernel.
|
def update(self, dict_name, mapping=None, priorities=None, expire=None,
locks=None):
'''Add mapping to a dictionary, replacing previous values
Can be called with only dict_name and expire to refresh the
expiration time.
NB: locks are only enforced if present, so nothing prevents
another caller from coming in an modifying data without using
locks.
:param mapping: a dict of keys and values to update in
dict_name. Must be specified if priorities is specified.
:param priorities: a dict with the same keys as those in
mapping that provides a numerical value indicating the
priority to assign to that key. Default sets 0 for all keys.
:param int expire: if specified, then dict_name will be set to
expire in that many seconds.
:param locks: a dict with the same keys as those in the
mapping. Before making any particular update, this function
checks if a key is present in a 'locks' table for this dict,
and if so, then its value must match the value provided in the
input locks dict for that key. If not, then the value
provided in the locks dict is inserted into the 'locks' table.
If the locks parameter is None, then no lock checking is
performed.
'''
if self._session_lock_identifier is None:
raise ProgrammerError('must acquire lock first')
if priorities is None:
## set all priorities to zero
priorities = defaultdict(int)
if locks is None:
## set all locks to None
locks = defaultdict(lambda: '')
if not (expire is None or isinstance(expire, int)):
raise ProgrammerError('expire must be int or unspecified')
conn = redis.Redis(connection_pool=self.pool)
script = conn.register_script('''
if redis.call("get", KEYS[1]) == ARGV[1]
then
for i = 3, #ARGV, 4 do
if ARGV[i+3] ~= 'j:""' then
local curr_lock = redis.call("hget", KEYS[4], ARGV[i])
if curr_lock and curr_lock ~= ARGV[i+3] then
return {-1, ARGV[i], curr_lock, ARGV[i+3]}
end
redis.call("hset", KEYS[4], ARGV[i], ARGV[i+3])
end
end
for i = 3, #ARGV, 4 do
redis.call("hset", KEYS[2], ARGV[i], ARGV[i+1])
redis.call("zadd", KEYS[3], ARGV[i+2], ARGV[i])
end
if tonumber(ARGV[2]) ~= nil then
redis.call("expire", KEYS[2], ARGV[2])
redis.call("expire", KEYS[3], ARGV[2])
end
return {1, 0}
else
-- ERROR: No longer own the lock
return {0, 0}
end
''')
dict_name = self._namespace(dict_name)
if mapping is None:
mapping = {}
items = []
## This flattens the dictionary into a list
for key, value in mapping.iteritems():
items.append(self._encode(key))
items.append(self._encode(value))
items.append(priorities[key])
items.append(self._encode(locks[key]))
#logger.debug('update %r %r', dict_name, items)
res = script(keys=[self._lock_name,
dict_name,
dict_name + 'keys',
dict_name + '_locks'],
args=[self._session_lock_identifier, expire] + items)
if res[0] == 0:
raise EnvironmentError(
'Unable to add items to %s in registry' % dict_name)
elif res[0] == -1:
raise EnvironmentError(
'lost lock on key=%r owned by %r not %r in %s'
% (self._decode(res[1]), res[2], res[3], dict_name))
|
Add mapping to a dictionary, replacing previous values
Can be called with only dict_name and expire to refresh the
expiration time.
NB: locks are only enforced if present, so nothing prevents
another caller from coming in an modifying data without using
locks.
:param mapping: a dict of keys and values to update in
dict_name. Must be specified if priorities is specified.
:param priorities: a dict with the same keys as those in
mapping that provides a numerical value indicating the
priority to assign to that key. Default sets 0 for all keys.
:param int expire: if specified, then dict_name will be set to
expire in that many seconds.
:param locks: a dict with the same keys as those in the
mapping. Before making any particular update, this function
checks if a key is present in a 'locks' table for this dict,
and if so, then its value must match the value provided in the
input locks dict for that key. If not, then the value
provided in the locks dict is inserted into the 'locks' table.
If the locks parameter is None, then no lock checking is
performed.
|
def get_distribute_verbatim_metadata(self):
"""Gets the metadata for the distribute verbatim rights flag.
return: (osid.Metadata) - metadata for the distribution rights
fields
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceForm.get_group_metadata_template
metadata = dict(self._mdata['distribute_verbatim'])
metadata.update({'existing_boolean_values': self._my_map['distributeVerbatim']})
return Metadata(**metadata)
|
Gets the metadata for the distribute verbatim rights flag.
return: (osid.Metadata) - metadata for the distribution rights
fields
*compliance: mandatory -- This method must be implemented.*
|
def add(self, entry):
"""
Override this to influence the mechanics of the Archive.
Assumes entry is a seq beginning with (nm, pth, ...) where
nm is the key by which we'll be asked for the object.
pth is the name of where we find the object. Overrides of
get_obj_from can make use of further elements in entry.
"""
if self.os is None:
import os
self.os = os
nm = entry[0]
pth = entry[1]
pynm, ext = self.os.path.splitext(self.os.path.basename(pth))
ispkg = pynm == '__init__'
assert ext in ('.pyc', '.pyo')
self.toc[nm] = (ispkg, self.lib.tell())
f = open(entry[1], 'rb')
f.seek(8) # skip magic and timestamp
self.lib.write(f.read())
|
Override this to influence the mechanics of the Archive.
Assumes entry is a seq beginning with (nm, pth, ...) where
nm is the key by which we'll be asked for the object.
pth is the name of where we find the object. Overrides of
get_obj_from can make use of further elements in entry.
|
def create(self, handle, title=None, description=None):
""" Create a role """
role = Role(handle=handle, title=title, description=description)
schema = RoleSchema()
valid = schema.process(role)
if not valid:
return valid
db.session.add(role)
db.session.commit()
events.role_created_event.send(role)
return role
|
Create a role
|
def getJobStatus(self, workers):
"""
Parameters:
----------------------------------------------------------------------
workers: If this job was launched outside of the nupic job engine, then this
is an array of subprocess Popen instances, one for each worker
retval: _NupicJob.JobStatus instance
"""
jobInfo = self.JobStatus(self.__nupicJobID, workers)
return jobInfo
|
Parameters:
----------------------------------------------------------------------
workers: If this job was launched outside of the nupic job engine, then this
is an array of subprocess Popen instances, one for each worker
retval: _NupicJob.JobStatus instance
|
def _quote_username(name):
'''
Usernames can only contain ascii chars, so make sure we return a str type
'''
if not isinstance(name, six.string_types):
return str(name) # future lint: disable=blacklisted-function
else:
return salt.utils.stringutils.to_str(name)
|
Usernames can only contain ascii chars, so make sure we return a str type
|
def storage_type(self):
"""Depending on input data type, the storage type is either
"field" (complex) or "phase" (real)."""
nf = np.load(str(self.path), mmap_mode="c", allow_pickle=False)
if np.iscomplexobj(nf):
st = "field"
else:
st = "phase"
return st
|
Depending on input data type, the storage type is either
"field" (complex) or "phase" (real).
|
def cancel_job(
self,
project_id,
region,
job_id,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
"""
Starts a job cancellation request. To access the job resource after
cancellation, call
`regions/{region}/jobs.list <https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list>`__
or
`regions/{region}/jobs.get <https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get>`__.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.JobControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `job_id`:
>>> job_id = ''
>>>
>>> response = client.cancel_job(project_id, region, job_id)
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the job
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
job_id (str): Required. The job ID.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types.Job` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
"""
# Wrap the transport method to add retry and timeout logic.
if "cancel_job" not in self._inner_api_calls:
self._inner_api_calls[
"cancel_job"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.cancel_job,
default_retry=self._method_configs["CancelJob"].retry,
default_timeout=self._method_configs["CancelJob"].timeout,
client_info=self._client_info,
)
request = jobs_pb2.CancelJobRequest(
project_id=project_id, region=region, job_id=job_id
)
return self._inner_api_calls["cancel_job"](
request, retry=retry, timeout=timeout, metadata=metadata
)
|
Starts a job cancellation request. To access the job resource after
cancellation, call
`regions/{region}/jobs.list <https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list>`__
or
`regions/{region}/jobs.get <https://cloud.google.com/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get>`__.
Example:
>>> from google.cloud import dataproc_v1beta2
>>>
>>> client = dataproc_v1beta2.JobControllerClient()
>>>
>>> # TODO: Initialize `project_id`:
>>> project_id = ''
>>>
>>> # TODO: Initialize `region`:
>>> region = ''
>>>
>>> # TODO: Initialize `job_id`:
>>> job_id = ''
>>>
>>> response = client.cancel_job(project_id, region, job_id)
Args:
project_id (str): Required. The ID of the Google Cloud Platform project that the job
belongs to.
region (str): Required. The Cloud Dataproc region in which to handle the request.
job_id (str): Required. The job ID.
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.dataproc_v1beta2.types.Job` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid.
|
def volume(self):
"""
The analytic volume of the cylinder primitive.
Returns
---------
volume : float
Volume of the cylinder
"""
volume = ((np.pi * self.primitive.radius ** 2) *
self.primitive.height)
return volume
|
The analytic volume of the cylinder primitive.
Returns
---------
volume : float
Volume of the cylinder
|
def cli(ctx, board, fpga, pack, type, size, project_dir,
verbose, verbose_yosys, verbose_arachne):
"""Bitstream timing analysis."""
# Run scons
exit_code = SCons(project_dir).time({
'board': board,
'fpga': fpga,
'size': size,
'type': type,
'pack': pack,
'verbose': {
'all': verbose,
'yosys': verbose_yosys,
'arachne': verbose_arachne
}
})
ctx.exit(exit_code)
|
Bitstream timing analysis.
|
def autoLayout(self):
"""
Automatically lays out the contents for this widget.
"""
try:
direction = self.currentSlide().scene().direction()
except AttributeError:
direction = QtGui.QBoxLayout.TopToBottom
size = self.size()
self._slideshow.resize(size)
prev = self._previousButton
next = self._nextButton
if direction == QtGui.QBoxLayout.BottomToTop:
y = 9
else:
y = size.height() - prev.height() - 9
prev.move(9, y)
next.move(size.width() - next.width() - 9, y)
# update the layout for the slides
for i in range(self._slideshow.count()):
widget = self._slideshow.widget(i)
widget.scene().autoLayout(size)
|
Automatically lays out the contents for this widget.
|
def fileopenbox(msg=None
, title=None
, default="*"
, filetypes=None
):
"""
A dialog to get a file name.
About the "default" argument
============================
The "default" argument specifies a filepath that (normally)
contains one or more wildcards.
fileopenbox will display only files that match the default filepath.
If omitted, defaults to "*" (all files in the current directory).
WINDOWS EXAMPLE::
...default="c:/myjunk/*.py"
will open in directory c:\myjunk\ and show all Python files.
WINDOWS EXAMPLE::
...default="c:/myjunk/test*.py"
will open in directory c:\myjunk\ and show all Python files
whose names begin with "test".
Note that on Windows, fileopenbox automatically changes the path
separator to the Windows path separator (backslash).
About the "filetypes" argument
==============================
If specified, it should contain a list of items,
where each item is either::
- a string containing a filemask # e.g. "*.txt"
- a list of strings, where all of the strings except the last one
are filemasks (each beginning with "*.",
such as "*.txt" for text files, "*.py" for Python files, etc.).
and the last string contains a filetype description
EXAMPLE::
filetypes = ["*.css", ["*.htm", "*.html", "HTML files"] ]
NOTE THAT
=========
If the filetypes list does not contain ("All files","*"),
it will be added.
If the filetypes list does not contain a filemask that includes
the extension of the "default" argument, it will be added.
For example, if default="*abc.py"
and no filetypes argument was specified, then
"*.py" will automatically be added to the filetypes argument.
@rtype: string or None
@return: the name of a file, or None if user chose to cancel
@arg msg: the msg to be displayed.
@arg title: the window title
@arg default: filepath with wildcards
@arg filetypes: filemasks that a user can choose, e.g. "*.txt"
"""
if sys.platform == 'darwin':
_bring_to_front()
localRoot = Tk()
localRoot.withdraw()
initialbase, initialfile, initialdir, filetypes = fileboxSetup(default,filetypes)
#------------------------------------------------------------
# if initialfile contains no wildcards; we don't want an
# initial file. It won't be used anyway.
# Also: if initialbase is simply "*", we don't want an
# initialfile; it is not doing any useful work.
#------------------------------------------------------------
if (initialfile.find("*") < 0) and (initialfile.find("?") < 0):
initialfile = None
elif initialbase == "*":
initialfile = None
f = tk_FileDialog.askopenfilename(parent=localRoot
, title=getFileDialogTitle(msg,title)
, initialdir=initialdir
, initialfile=initialfile
, filetypes=filetypes
)
localRoot.destroy()
if not f: return None
return os.path.normpath(f)
|
A dialog to get a file name.
About the "default" argument
============================
The "default" argument specifies a filepath that (normally)
contains one or more wildcards.
fileopenbox will display only files that match the default filepath.
If omitted, defaults to "*" (all files in the current directory).
WINDOWS EXAMPLE::
...default="c:/myjunk/*.py"
will open in directory c:\myjunk\ and show all Python files.
WINDOWS EXAMPLE::
...default="c:/myjunk/test*.py"
will open in directory c:\myjunk\ and show all Python files
whose names begin with "test".
Note that on Windows, fileopenbox automatically changes the path
separator to the Windows path separator (backslash).
About the "filetypes" argument
==============================
If specified, it should contain a list of items,
where each item is either::
- a string containing a filemask # e.g. "*.txt"
- a list of strings, where all of the strings except the last one
are filemasks (each beginning with "*.",
such as "*.txt" for text files, "*.py" for Python files, etc.).
and the last string contains a filetype description
EXAMPLE::
filetypes = ["*.css", ["*.htm", "*.html", "HTML files"] ]
NOTE THAT
=========
If the filetypes list does not contain ("All files","*"),
it will be added.
If the filetypes list does not contain a filemask that includes
the extension of the "default" argument, it will be added.
For example, if default="*abc.py"
and no filetypes argument was specified, then
"*.py" will automatically be added to the filetypes argument.
@rtype: string or None
@return: the name of a file, or None if user chose to cancel
@arg msg: the msg to be displayed.
@arg title: the window title
@arg default: filepath with wildcards
@arg filetypes: filemasks that a user can choose, e.g. "*.txt"
|
def _parse_accented_syllable(unparsed_syllable):
"""Return the syllable and tone of an accented Pinyin syllable.
Any accented vowels are returned without their accents.
Implements the following algorithm:
1. If the syllable has an accent mark, convert that vowel to a
regular vowel and add the tone to the end of the syllable.
2. Otherwise, assume the syllable is tone 5 (no accent marks).
"""
if unparsed_syllable[0] == '\u00B7':
# Special case for middle dot tone mark.
return unparsed_syllable[1:], '5'
for character in unparsed_syllable:
if character in _ACCENTED_VOWELS:
vowel, tone = _accented_vowel_to_numbered(character)
return unparsed_syllable.replace(character, vowel), tone
return unparsed_syllable, '5'
|
Return the syllable and tone of an accented Pinyin syllable.
Any accented vowels are returned without their accents.
Implements the following algorithm:
1. If the syllable has an accent mark, convert that vowel to a
regular vowel and add the tone to the end of the syllable.
2. Otherwise, assume the syllable is tone 5 (no accent marks).
|
def get_last_rconfiguration_id(topic_id, remoteci_id, db_conn=None):
"""Get the rconfiguration_id of the last job run by the remoteci.
:param topic_id: the topic
:param remoteci_id: the remoteci id
:return: last rconfiguration_id of the remoteci
"""
db_conn = db_conn or flask.g.db_conn
__TABLE = models.JOBS
query = sql.select([__TABLE.c.rconfiguration_id]). \
order_by(sql.desc(__TABLE.c.created_at)). \
where(sql.and_(__TABLE.c.topic_id == topic_id,
__TABLE.c.remoteci_id == remoteci_id)). \
limit(1)
rconfiguration_id = db_conn.execute(query).fetchone()
if rconfiguration_id is not None:
return str(rconfiguration_id[0])
else:
return None
|
Get the rconfiguration_id of the last job run by the remoteci.
:param topic_id: the topic
:param remoteci_id: the remoteci id
:return: last rconfiguration_id of the remoteci
|
def string_format(data, out='nested', opts=None, **kwargs):
'''
Return the outputter formatted string, removing the ANSI escape sequences.
data
The JSON serializable object.
out: ``nested``
The name of the output to use to transform the data. Default: ``nested``.
opts
Dictionary of configuration options. Default: ``__opts__``.
kwargs
Arguments to sent to the outputter module.
CLI Example:
.. code-block:: bash
salt '*' out.string_format "{'key': 'value'}" out=table
'''
if not opts:
opts = __opts__
return salt.output.string_format(data, out, opts=opts, **kwargs)
|
Return the outputter formatted string, removing the ANSI escape sequences.
data
The JSON serializable object.
out: ``nested``
The name of the output to use to transform the data. Default: ``nested``.
opts
Dictionary of configuration options. Default: ``__opts__``.
kwargs
Arguments to sent to the outputter module.
CLI Example:
.. code-block:: bash
salt '*' out.string_format "{'key': 'value'}" out=table
|
def _get_broadcast_shape(shape1, shape2):
"""Given two shapes that are not identical, find the shape
that both input shapes can broadcast to."""
if shape1 == shape2:
return shape1
length1 = len(shape1)
length2 = len(shape2)
if length1 > length2:
shape = list(shape1)
else:
shape = list(shape2)
i = max(length1, length2) - 1
for a, b in zip(shape1[::-1], shape2[::-1]):
if a != 1 and b != 1 and a != b:
raise ValueError('shape1=%s is not broadcastable to shape2=%s' % (shape1, shape2))
shape[i] = max(a, b)
i -= 1
return tuple(shape)
|
Given two shapes that are not identical, find the shape
that both input shapes can broadcast to.
|
def small_integer(anon, obj, field, val):
"""
Returns a random small integer (for a Django SmallIntegerField)
"""
return anon.faker.small_integer(field=field)
|
Returns a random small integer (for a Django SmallIntegerField)
|
def set_connection_params(self, ip_address, tsap_snap7, tsap_logo):
"""
Sets internally (IP, LocalTSAP, RemoteTSAP) Coordinates.
This function must be called just before Cli_Connect().
:param ip_address: IP ip_address of server
:param tsap_snap7: TSAP SNAP7 Client (e.g. 10.00 = 0x1000)
:param tsap_logo: TSAP Logo Server (e.g. 20.00 = 0x2000)
"""
assert re.match(ipv4, ip_address), '%s is invalid ipv4' % ip_address
result = self.library.Cli_SetConnectionParams(self.pointer, ip_address.encode(),
c_uint16(tsap_snap7),
c_uint16(tsap_logo))
if result != 0:
raise Snap7Exception("The parameter was invalid")
|
Sets internally (IP, LocalTSAP, RemoteTSAP) Coordinates.
This function must be called just before Cli_Connect().
:param ip_address: IP ip_address of server
:param tsap_snap7: TSAP SNAP7 Client (e.g. 10.00 = 0x1000)
:param tsap_logo: TSAP Logo Server (e.g. 20.00 = 0x2000)
|
def compose_path(pub, uuid_url=False):
"""
Compose absolute path for given `pub`.
Args:
pub (obj): :class:`.DBPublication` instance.
uuid_url (bool, default False): Compose URL using UUID.
Returns:
str: Absolute url-path of the publication, without server's address \
and protocol.
Raises:
PrivatePublicationError: When the `pub` is private publication.
"""
if uuid_url:
return join(
"/",
UUID_DOWNLOAD_KEY,
str(pub.uuid)
)
return join(
"/",
DOWNLOAD_KEY,
basename(pub.file_pointer),
basename(pub.filename)
)
|
Compose absolute path for given `pub`.
Args:
pub (obj): :class:`.DBPublication` instance.
uuid_url (bool, default False): Compose URL using UUID.
Returns:
str: Absolute url-path of the publication, without server's address \
and protocol.
Raises:
PrivatePublicationError: When the `pub` is private publication.
|
def all(cls, include_deactivated=False):
"""
Get all resources
:param include_deactivated: Include deactivated resources in response
:returns: list of Document instances
:raises: SocketError, CouchException
"""
if include_deactivated:
resources = yield cls.view.get(include_docs=True)
else:
resources = yield cls.active_view.get(include_docs=True)
raise Return([cls(**resource['doc']) for resource in resources['rows']])
|
Get all resources
:param include_deactivated: Include deactivated resources in response
:returns: list of Document instances
:raises: SocketError, CouchException
|
def isometric_build_atlased_mesh(script, BorderSize=0.1):
"""Isometric parameterization: Build Atlased Mesh
This actually generates the UV mapping from the isometric parameterization
"""
filter_xml = ''.join([
' <filter name="Iso Parametrization Build Atlased Mesh">\n',
' <Param name="BorderSize"',
'value="%s"' % BorderSize,
'description="BorderSize ratio"',
'min="0.01"',
'max="0.5"',
'type="RichDynamicFloat"',
'tooltip="This parameter controls the amount of space that must be left between each diamond when building the atlas. It directly affects how many triangle are splitted during this conversion. In abstract parametrization mesh triangles can naturally cross the triangles of the abstract domain, so when converting to a standard parametrization we must cut all the triangles that protrudes outside each diamond more than the specified threshold. The unit of the threshold is in percentage of the size of the diamond, the bigger the threshold the less triangles are splitted, but the more UV space is used (wasted)."',
'/>\n',
' </filter>\n'])
util.write_filter(script, filter_xml)
return None
|
Isometric parameterization: Build Atlased Mesh
This actually generates the UV mapping from the isometric parameterization
|
def postorder(self, node=None):
"""Walk the tree in roughly 'postorder' (a bit of a lie
explained below).
For each node with typestring name *name* if the
node has a method called n_*name*, call that before walking
children. If there is no method define, call a
self.default(node) instead. Subclasses of GenericASTTtraversal
ill probably want to override this method.
If the node has a method called *name*_exit, that is called
after all children have been called. So in this sense this
function is a lie.
In typical use a node with children can call "postorder" in
any order it wants which may skip children or order then in
ways other than first to last. In fact, this this happens.
"""
if node is None:
node = self.ast
try:
first = iter(node)
except TypeError:
first = None
if first:
for kid in node:
self.postorder(kid)
try:
name = 'n_' + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
func(node)
else:
self.default(node)
except GenericASTTraversalPruningException:
return
name = name + '_exit'
if hasattr(self, name):
func = getattr(self, name)
func(node)
|
Walk the tree in roughly 'postorder' (a bit of a lie
explained below).
For each node with typestring name *name* if the
node has a method called n_*name*, call that before walking
children. If there is no method define, call a
self.default(node) instead. Subclasses of GenericASTTtraversal
ill probably want to override this method.
If the node has a method called *name*_exit, that is called
after all children have been called. So in this sense this
function is a lie.
In typical use a node with children can call "postorder" in
any order it wants which may skip children or order then in
ways other than first to last. In fact, this this happens.
|
def get_event_log(self, object_id):
"""Get the specified event log."""
content = self._fetch("/event_log/%s" % object_id, method="GET")
return FastlyEventLog(self, content)
|
Get the specified event log.
|
def get_action_group_names(self):
"""Return all the security group names configured in this action."""
return self.get_group_names(
list(itertools.chain(
*[self._get_array('add'),
self._get_array('remove'),
self._get_array('isolation-group')])))
|
Return all the security group names configured in this action.
|
def qImageToArray(qimage, dtype = 'array'):
"""Convert QImage to numpy.ndarray. The dtype defaults to uint8
for QImage.Format_Indexed8 or `bgra_dtype` (i.e. a record array)
for 32bit color images. You can pass a different dtype to use, or
'array' to get a 3D uint8 array for color images."""
result_shape = (qimage.height(), qimage.width())
temp_shape = (qimage.height(),
qimage.bytesPerLine() * 8 // qimage.depth())
if qimage.format() in (QtGui.QImage.Format_ARGB32_Premultiplied,
QtGui.QImage.Format_ARGB32,
QtGui.QImage.Format_RGB32):
if dtype == 'rec':
dtype = np.dtype({'b': (np.uint8, 0),
'g': (np.uint8, 1),
'r': (np.uint8, 2),
'a': (np.uint8, 3)})
elif dtype == 'array':
dtype = np.uint8
result_shape += (4, )
temp_shape += (4, )
elif qimage.format() == QtGui.QImage.Format_Indexed8:
dtype = np.uint8
else:
raise ValueError("qimage2numpy only supports 32bit and 8bit images")
# FIXME: raise error if alignment does not match
buf = qimage.bits().asstring(qimage.byteCount())
result = np.frombuffer(buf, dtype).reshape(temp_shape)
if result_shape != temp_shape:
result = result[:,:result_shape[1]]
if qimage.format() == QtGui.QImage.Format_RGB32 and dtype == np.uint8:
#case byteorder == 'little'
result = result[...,:3]
#byteorder == 'big' -> get ARGB
result = result[...,::-1]
return result
|
Convert QImage to numpy.ndarray. The dtype defaults to uint8
for QImage.Format_Indexed8 or `bgra_dtype` (i.e. a record array)
for 32bit color images. You can pass a different dtype to use, or
'array' to get a 3D uint8 array for color images.
|
def _inject_target(self, target_adaptor):
"""Inject a target, respecting all sources of dependencies."""
target_cls = self._target_types[target_adaptor.type_alias]
declared_deps = target_adaptor.dependencies
implicit_deps = (Address.parse(s,
relative_to=target_adaptor.address.spec_path,
subproject_roots=self._address_mapper.subproject_roots)
for s in target_cls.compute_dependency_specs(kwargs=target_adaptor.kwargs()))
for dep in declared_deps:
self._dependent_address_map[dep].add(target_adaptor.address)
for dep in implicit_deps:
self._implicit_dependent_address_map[dep].add(target_adaptor.address)
|
Inject a target, respecting all sources of dependencies.
|
def trace2(A, B):
r"""Trace of :math:`\mathrm A \mathrm B^\intercal`.
Args:
A (array_like): Left-hand side.
B (array_like): Right-hand side.
Returns:
float: Trace of :math:`\mathrm A \mathrm B^\intercal`.
"""
A = asarray(A, float)
B = asarray(B, float)
layout_error = "Wrong matrix layout."
if not (len(A.shape) == 2 and len(B.shape) == 2):
raise ValueError(layout_error)
if not (A.shape[1] == B.shape[0] and A.shape[0] == B.shape[1]):
raise ValueError(layout_error)
return _sum(A.T * B)
|
r"""Trace of :math:`\mathrm A \mathrm B^\intercal`.
Args:
A (array_like): Left-hand side.
B (array_like): Right-hand side.
Returns:
float: Trace of :math:`\mathrm A \mathrm B^\intercal`.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.