code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def match_column_labels(self, match_value_or_fct, levels=None, max_matches=0, empty_res=1):
"""Check the original DataFrame's column labels to find a subset of the current region
:param match_value_or_fct: value or function(hdr_value) which returns True for match
:param levels: [None, scalar, indexer]
:param max_matches: maximum number of columns to return
:return:
"""
allmatches = self.parent._find_column_label_positions(match_value_or_fct, levels)
# only keep matches which are within this region
matches = [m for m in allmatches if m in self.col_ilocs]
if max_matches and len(matches) > max_matches:
matches = matches[:max_matches]
if matches:
return RegionFormatter(self.parent, self.row_ilocs, pd.Int64Index(matches))
elif empty_res:
return self.empty_frame()
|
Check the original DataFrame's column labels to find a subset of the current region
:param match_value_or_fct: value or function(hdr_value) which returns True for match
:param levels: [None, scalar, indexer]
:param max_matches: maximum number of columns to return
:return:
|
def begin(self, *args, **kwargs):
"""Indicate the beginning of a transaction.
During a transaction, connections won't be transparently
replaced, and all errors will be raised to the application.
If the underlying driver supports this method, it will be called
with the given parameters (e.g. for distributed transactions).
"""
self._transaction = True
try:
begin = self._con.begin
except AttributeError:
pass
else:
begin(*args, **kwargs)
|
Indicate the beginning of a transaction.
During a transaction, connections won't be transparently
replaced, and all errors will be raised to the application.
If the underlying driver supports this method, it will be called
with the given parameters (e.g. for distributed transactions).
|
def run(self, records):
"""Runs the batch upload
:param records: an iterable containing queue entries
"""
self_name = type(self).__name__
for i, batch in enumerate(grouper(records, self.BATCH_SIZE, skip_missing=True), 1):
self.logger.info('%s processing batch %d', self_name, i)
try:
for j, proc_batch in enumerate(grouper(
process_records(batch).iteritems(), self.BATCH_SIZE, skip_missing=True), 1):
self.logger.info('%s uploading chunk #%d (batch %d)', self_name, j, i)
self.upload_records({k: v for k, v in proc_batch}, from_queue=True)
except Exception:
self.logger.exception('%s could not upload batch', self_name)
return
self.logger.info('%s finished batch %d', self_name, i)
self.processed_records(batch)
self.logger.info('%s finished', self_name)
|
Runs the batch upload
:param records: an iterable containing queue entries
|
def get_logging_file_handler(logger=None, file=None, formatter=LOGGING_DEFAULT_FORMATTER):
"""
Adds a logging file handler to given logger or default logger using given file.
:param logger: Logger to add the handler to.
:type logger: Logger
:param file: File to verbose into.
:type file: unicode
:param formatter: Handler formatter.
:type formatter: Formatter
:return: Added handler.
:rtype: Handler
"""
logger = LOGGER if logger is None else logger
file = tempfile.NamedTemporaryFile().name if file is None else file
logging_file_handler = logging.FileHandler(file)
logging_file_handler.setFormatter(formatter)
logger.addHandler(logging_file_handler)
return logging_file_handler
|
Adds a logging file handler to given logger or default logger using given file.
:param logger: Logger to add the handler to.
:type logger: Logger
:param file: File to verbose into.
:type file: unicode
:param formatter: Handler formatter.
:type formatter: Formatter
:return: Added handler.
:rtype: Handler
|
def absent(
name,
region=None,
key=None,
keyid=None,
profile=None,
):
'''
Ensure the named sqs queue is deleted.
name
Name of the SQS queue.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
'''
ret = {'name': name, 'result': True, 'comment': '', 'changes': {}}
r = __salt__['boto_sqs.exists'](
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in r:
ret['result'] = False
ret['comment'] = six.text_type(r['error'])
return ret
if not r['result']:
ret['comment'] = 'SQS queue {0} does not exist in {1}.'.format(
name,
region,
)
return ret
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'SQS queue {0} is set to be removed.'.format(name)
ret['changes'] = {'old': name, 'new': None}
return ret
r = __salt__['boto_sqs.delete'](
name,
region=region,
key=key,
keyid=keyid,
profile=profile,
)
if 'error' in r:
ret['result'] = False
ret['comment'] = six.text_type(r['error'])
return ret
ret['comment'] = 'SQS queue {0} was deleted.'.format(name)
ret['changes']['old'] = name
ret['changes']['new'] = None
return ret
|
Ensure the named sqs queue is deleted.
name
Name of the SQS queue.
region
Region to connect to.
key
Secret key to be used.
keyid
Access key to be used.
profile
A dict with region, key and keyid, or a pillar key (string)
that contains a dict with region, key and keyid.
|
def _validate_string(self, input_string, path_to_root, object_title=''):
'''
a helper method for validating properties of a string
:return: input_string
'''
rules_path_to_root = re.sub('\[\d+\]', '[0]', path_to_root)
input_criteria = self.keyMap[rules_path_to_root]
error_dict = {
'object_title': object_title,
'model_schema': self.schema,
'input_criteria': input_criteria,
'failed_test': 'value_datatype',
'input_path': path_to_root,
'error_value': input_string,
'error_code': 4001
}
if 'byte_data' in input_criteria.keys():
if input_criteria['byte_data']:
error_dict['failed_test'] = 'byte_data'
error_dict['error_code'] = 4011
try:
decoded_bytes = b64decode(input_string)
except:
raise InputValidationError(error_dict)
if not isinstance(decoded_bytes, bytes):
raise InputValidationError(error_dict)
if 'min_value' in input_criteria.keys():
if input_string < input_criteria['min_value']:
error_dict['failed_test'] = 'min_value'
error_dict['error_code'] = 4022
raise InputValidationError(error_dict)
if 'max_value' in input_criteria.keys():
if input_string > input_criteria['max_value']:
error_dict['failed_test'] = 'max_value'
error_dict['error_code'] = 4023
raise InputValidationError(error_dict)
if 'greater_than' in input_criteria.keys():
if input_string <= input_criteria['greater_than']:
error_dict['failed_test'] = 'greater_than'
error_dict['error_code'] = 4024
raise InputValidationError(error_dict)
if 'less_than' in input_criteria.keys():
if input_string >= input_criteria['less_than']:
error_dict['failed_test'] = 'less_than'
error_dict['error_code'] = 4025
raise InputValidationError(error_dict)
if 'equal_to' in input_criteria.keys():
if input_string != input_criteria['equal_to']:
error_dict['failed_test'] = 'equal_to'
error_dict['error_code'] = 4026
raise InputValidationError(error_dict)
if 'min_length' in input_criteria.keys():
if len(input_string) < input_criteria['min_length']:
error_dict['failed_test'] = 'min_length'
error_dict['error_code'] = 4012
raise InputValidationError(error_dict)
if 'max_length' in input_criteria.keys():
if len(input_string) > input_criteria['max_length']:
error_dict['failed_test'] = 'max_length'
error_dict['error_code'] = 4013
raise InputValidationError(error_dict)
if 'must_not_contain' in input_criteria.keys():
for regex in input_criteria['must_not_contain']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_not_contain'
error_dict['error_code'] = 4014
raise InputValidationError(error_dict)
if 'must_contain' in input_criteria.keys():
for regex in input_criteria['must_contain']:
regex_pattern = re.compile(regex)
if not regex_pattern.findall(input_string):
error_dict['failed_test'] = 'must_contain'
error_dict['error_code'] = 4015
raise InputValidationError(error_dict)
if 'contains_either' in input_criteria.keys():
regex_match = False
for regex in input_criteria['contains_either']:
regex_pattern = re.compile(regex)
if regex_pattern.findall(input_string):
regex_match = True
if not regex_match:
error_dict['failed_test'] = 'contains_either'
error_dict['error_code'] = 4016
raise InputValidationError(error_dict)
if 'discrete_values' in input_criteria.keys():
if input_string not in input_criteria['discrete_values']:
error_dict['failed_test'] = 'discrete_values'
error_dict['error_code'] = 4041
raise InputValidationError(error_dict)
if 'excluded_values' in input_criteria.keys():
if input_string in input_criteria['excluded_values']:
error_dict['failed_test'] = 'excluded_values'
error_dict['error_code'] = 4042
raise InputValidationError(error_dict)
# TODO: validate string against identical to reference
# TODO: run lambda function and call validation url
return input_string
|
a helper method for validating properties of a string
:return: input_string
|
def savePkeyPem(self, pkey, path):
'''
Save a private key in PEM format to a file outside the certdir.
'''
with s_common.genfile(path) as fd:
fd.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, pkey))
|
Save a private key in PEM format to a file outside the certdir.
|
def imshow(image, backend=IMSHOW_BACKEND_DEFAULT):
"""
Shows an image in a window.
dtype support::
* ``uint8``: yes; not tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
image : (H,W,3) ndarray
Image to show.
backend : {'matplotlib', 'cv2'}, optional
Library to use to show the image. May be either matplotlib or OpenCV ('cv2').
OpenCV tends to be faster, but apparently causes more technical issues.
"""
do_assert(backend in ["matplotlib", "cv2"], "Expected backend 'matplotlib' or 'cv2', got %s." % (backend,))
if backend == "cv2":
image_bgr = image
if image.ndim == 3 and image.shape[2] in [3, 4]:
image_bgr = image[..., 0:3][..., ::-1]
win_name = "imgaug-default-window"
cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
cv2.imshow(win_name, image_bgr)
cv2.waitKey(0)
cv2.destroyWindow(win_name)
else:
# import only when necessary (faster startup; optional dependency; less fragile -- see issue #225)
import matplotlib.pyplot as plt
dpi = 96
h, w = image.shape[0] / dpi, image.shape[1] / dpi
w = max(w, 6) # if the figure is too narrow, the footer may appear and make the fig suddenly wider (ugly)
fig, ax = plt.subplots(figsize=(w, h), dpi=dpi)
fig.canvas.set_window_title("imgaug.imshow(%s)" % (image.shape,))
ax.imshow(image, cmap="gray") # cmap is only activate for grayscale images
plt.show()
|
Shows an image in a window.
dtype support::
* ``uint8``: yes; not tested
* ``uint16``: ?
* ``uint32``: ?
* ``uint64``: ?
* ``int8``: ?
* ``int16``: ?
* ``int32``: ?
* ``int64``: ?
* ``float16``: ?
* ``float32``: ?
* ``float64``: ?
* ``float128``: ?
* ``bool``: ?
Parameters
----------
image : (H,W,3) ndarray
Image to show.
backend : {'matplotlib', 'cv2'}, optional
Library to use to show the image. May be either matplotlib or OpenCV ('cv2').
OpenCV tends to be faster, but apparently causes more technical issues.
|
def describe_role(name, region=None, key=None, keyid=None, profile=None):
'''
Get information for a role.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.describe_role myirole
'''
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
try:
info = conn.get_role(name)
if not info:
return False
role = info.get_role_response.get_role_result.role
role['assume_role_policy_document'] = salt.utils.json.loads(_unquote(
role.assume_role_policy_document
))
# If Sid wasn't defined by the user, boto will still return a Sid in
# each policy. To properly check idempotently, let's remove the Sid
# from the return if it's not actually set.
for policy_key, policy in role['assume_role_policy_document'].items():
if policy_key == 'Statement':
for val in policy:
if 'Sid' in val and not val['Sid']:
del val['Sid']
return role
except boto.exception.BotoServerError as e:
log.debug(e)
log.error('Failed to get %s information.', name)
return False
|
Get information for a role.
CLI Example:
.. code-block:: bash
salt myminion boto_iam.describe_role myirole
|
def _get_view_method(self, request):
"""Get view method."""
if hasattr(self, 'action'):
return self.action if self.action else None
return request.method.lower()
|
Get view method.
|
def get_arr_desc(arr):
"""Get array description, in the form '<array type> <array shape>'"""
type_ = type(arr).__name__ # see also __qualname__
shape = getattr(arr, 'shape', None)
if shape is not None:
desc = '{type_} {shape}'
else:
desc = '{type_} <no shape>'
return desc.format(type_=type_, shape=shape)
|
Get array description, in the form '<array type> <array shape>
|
def coinc(self, s0, s1, slide, step):
"""
Calculate the coincident detection statistic.
Parameters
----------
s0: numpy.ndarray
Single detector ranking statistic for the first detector.
s1: numpy.ndarray
Single detector ranking statistic for the second detector.
slide: numpy.ndarray
Array of ints. These represent the multiple of the timeslide
interval to bring a pair of single detector triggers into coincidence.
step: float
The timeslide interval in seconds.
Returns
-------
coinc_stat: numpy.ndarray
An array of the coincident ranking statistic values
"""
rstat = s0['snglstat']**2. + s1['snglstat']**2.
cstat = rstat + 2. * self.logsignalrate(s0, s1, slide, step)
cstat[cstat < 0] = 0
return cstat ** 0.5
|
Calculate the coincident detection statistic.
Parameters
----------
s0: numpy.ndarray
Single detector ranking statistic for the first detector.
s1: numpy.ndarray
Single detector ranking statistic for the second detector.
slide: numpy.ndarray
Array of ints. These represent the multiple of the timeslide
interval to bring a pair of single detector triggers into coincidence.
step: float
The timeslide interval in seconds.
Returns
-------
coinc_stat: numpy.ndarray
An array of the coincident ranking statistic values
|
def put(self, obj):
"""Put request into queue.
Args:
obj (cheroot.server.HTTPConnection): HTTP connection
waiting to be processed
"""
self._queue.put(obj, block=True, timeout=self._queue_put_timeout)
if obj is _SHUTDOWNREQUEST:
return
|
Put request into queue.
Args:
obj (cheroot.server.HTTPConnection): HTTP connection
waiting to be processed
|
def copy(self, filename=None):
"""Puts on destination as a temp file, renames on
the destination.
"""
dst = os.path.join(self.dst_path, filename)
src = os.path.join(self.src_path, filename)
dst_tmp = os.path.join(self.dst_tmp, filename)
self.put(src=src, dst=dst_tmp, callback=self.update_progress, confirm=True)
self.rename(src=dst_tmp, dst=dst)
|
Puts on destination as a temp file, renames on
the destination.
|
def put(self, pid, record):
"""Handle the sort of the files through the PUT deposit files.
Expected input in body PUT:
.. code-block:: javascript
[
{
"id": 1
},
{
"id": 2
},
...
}
Permission required: `update_permission_factory`.
:param pid: Pid object (from url).
:param record: Record object resolved from the pid.
:returns: The files.
"""
try:
ids = [data['id'] for data in json.loads(
request.data.decode('utf-8'))]
except KeyError:
raise WrongFile()
record.files.sort_by(*ids)
record.commit()
db.session.commit()
return self.make_response(obj=record.files, pid=pid, record=record)
|
Handle the sort of the files through the PUT deposit files.
Expected input in body PUT:
.. code-block:: javascript
[
{
"id": 1
},
{
"id": 2
},
...
}
Permission required: `update_permission_factory`.
:param pid: Pid object (from url).
:param record: Record object resolved from the pid.
:returns: The files.
|
def unperturbed_hamiltonian(states):
r"""Return the unperturbed atomic hamiltonian for given states.
We calcualte the atomic hamiltonian in the basis of the ground states of \
rubidium 87 (in GHz).
>>> g = State("Rb", 87, 5, 0, 1/Integer(2))
>>> magnetic_states = make_list_of_states([g], "magnetic")
>>> print(np.diag(unperturbed_hamiltonian(magnetic_states))/hbar/2/pi*1e-9)
[-4.2717+0.j -4.2717+0.j -4.2717+0.j 2.563 +0.j 2.563 +0.j 2.563 +0.j
2.563 +0.j 2.563 +0.j]
"""
Ne = len(states)
H0 = np.zeros((Ne, Ne), complex)
for i in range(Ne):
H0[i, i] = hbar*states[i].omega
return H0
|
r"""Return the unperturbed atomic hamiltonian for given states.
We calcualte the atomic hamiltonian in the basis of the ground states of \
rubidium 87 (in GHz).
>>> g = State("Rb", 87, 5, 0, 1/Integer(2))
>>> magnetic_states = make_list_of_states([g], "magnetic")
>>> print(np.diag(unperturbed_hamiltonian(magnetic_states))/hbar/2/pi*1e-9)
[-4.2717+0.j -4.2717+0.j -4.2717+0.j 2.563 +0.j 2.563 +0.j 2.563 +0.j
2.563 +0.j 2.563 +0.j]
|
def __cost(self, params, phase, X):
"""Computes activation, cost function, and derivative."""
params = self.__roll(params)
a = np.concatenate((np.ones((X.shape[0], 1)), X), axis=1) # This is a1
calculated_a = [a] # a1 is at index 0, a_n is at index n-1
calculated_z = [0] # There is no z1, z_n is at index n-1
for i, theta in enumerate(params): # calculated_a now contains a1, a2, a3 if there was only one hidden layer (two theta matrices)
z = calculated_a[-1] * theta.transpose() # z_n = a_n-1 * Theta_n-1'
calculated_z.append(z) # Save the new z_n
a = self.sigmoid(z) # a_n = sigmoid(z_n)
if i != len(params) - 1: # Don't append extra ones for the output layer
a = np.concatenate((np.ones((a.shape[0], 1)), a), axis=1) # Append the extra column of ones for all other layers
calculated_a.append(a) # Save the new a
if phase == 0:
if self.__num_labels > 1:
return np.argmax(calculated_a[-1], axis=1)
return np.round(calculated_a[-1])
J = np.sum(-np.multiply(self.__y, np.log(calculated_a[-1]))-np.multiply(1-self.__y, np.log(1-calculated_a[-1])))/self.__m; # Calculate cost
if self.__lambda != 0: # If we're using regularization...
J += np.sum([np.sum(np.power(theta[:,1:], 2)) for theta in params])*self.__lambda/(2.0*self.__m) # ...add it from all theta matrices
if phase == 1:
return J
reversed_d = []
reversed_theta_grad = []
for i in range(len(params)): # For once per theta matrix...
if i == 0: # ...if it's the first one...
d = calculated_a[-1] - self.__y # ...initialize the error...
else: # ...otherwise d_n-1 = d_n * Theta_n-1[missing ones] .* sigmoid(z_n-1)
d = np.multiply(reversed_d[-1]*params[-i][:,1:], self.sigmoid_grad(calculated_z[-1-i])) # With i=1/1 hidden layer we're getting Theta2 at index -1, and z2 at index -2
reversed_d.append(d)
theta_grad = reversed_d[-1].transpose() * calculated_a[-i-2] / self.__m
if self.__lambda != 0:
theta_grad += np.concatenate((np.zeros((params[-1-i].shape[0], 1)), params[-1-i][:,1:]), axis=1) * self.__lambda / self.__m# regularization
reversed_theta_grad.append(theta_grad)
theta_grad = self.__unroll(reversed(reversed_theta_grad))
return theta_grad
|
Computes activation, cost function, and derivative.
|
def command_for_func(func):
"""Create a command that calls the given function."""
class FuncCommand(BaseCommand):
def run(self):
func()
update_package_data(self.distribution)
return FuncCommand
|
Create a command that calls the given function.
|
def evaluateplanarR2derivs(Pot,R,phi=None,t=0.):
"""
NAME:
evaluateplanarR2derivs
PURPOSE:
evaluate the second radial derivative of a (list of) planarPotential instance(s)
INPUT:
Pot - (list of) planarPotential instance(s)
R - Cylindrical radius (can be Quantity)
phi= azimuth (optional; can be Quantity)
t= time (optional; can be Quantity)
OUTPUT:
F_R(R(,phi,t))
HISTORY:
2010-10-09 - Written - Bovy (IAS)
"""
from .Potential import _isNonAxi
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) planarPotential instances is non-axisymmetric, but you did not provide phi")
if isinstance(Pot,list) \
and nu.all([isinstance(p,planarPotential) for p in Pot]):
sum= 0.
for pot in Pot:
if nonAxi:
sum+= pot.R2deriv(R,phi=phi,t=t,use_physical=False)
else:
sum+= pot.R2deriv(R,t=t,use_physical=False)
return sum
elif isinstance(Pot,planarPotential):
if nonAxi:
return Pot.R2deriv(R,phi=phi,t=t,use_physical=False)
else:
return Pot.R2deriv(R,t=t,use_physical=False)
else: #pragma: no cover
raise PotentialError("Input to 'evaluatePotentials' is neither a Potential-instance or a list of such instances")
|
NAME:
evaluateplanarR2derivs
PURPOSE:
evaluate the second radial derivative of a (list of) planarPotential instance(s)
INPUT:
Pot - (list of) planarPotential instance(s)
R - Cylindrical radius (can be Quantity)
phi= azimuth (optional; can be Quantity)
t= time (optional; can be Quantity)
OUTPUT:
F_R(R(,phi,t))
HISTORY:
2010-10-09 - Written - Bovy (IAS)
|
def view_focused_activity(self) -> str:
'''View focused activity.'''
output, _ = self._execute(
'-s', self.device_sn, 'shell', 'dumpsys', 'activity', 'activities')
return re.findall(r'mFocusedActivity: .+(com[a-zA-Z0-9\.]+/.[a-zA-Z0-9\.]+)', output)[0]
|
View focused activity.
|
def movie(args):
"""
%prog movie input.bed scaffolds.fasta chr1
Visualize history of scaffold OO. The history is contained within the
tourfile, generated by path(). For each historical scaffold OO, the program
plots a separate PDF file. The plots can be combined to show the progression
as a little animation. The third argument limits the plotting to a
specific pseudomolecule, for example `chr1`.
"""
p = OptionParser(movie.__doc__)
p.add_option("--gapsize", default=100, type="int",
help="Insert gaps of size between scaffolds")
add_allmaps_plot_options(p)
opts, args = p.parse_args(args)
if len(args) != 3:
sys.exit(not p.print_help())
inputbed, scaffoldsfasta, seqid = args
gapsize = opts.gapsize
pf = inputbed.rsplit(".", 1)[0]
agpfile = pf + ".chr.agp"
tourfile = pf + ".tour"
fp = open(tourfile)
sizes = Sizes(scaffoldsfasta).mapping
ffmpeg = "ffmpeg"
mkdir(ffmpeg)
score = cur_score = None
i = 1
for header, block in read_block(fp, ">"):
s, tag, label = header[1:].split()
if s != seqid:
continue
tour = block[0].split()
tour = [(x[:-1], x[-1]) for x in tour]
if label.startswith("GA"):
cur_score = label.split("-")[-1]
if cur_score == score:
i += 1
continue
score = cur_score
image_name = ".".join((seqid, "{0:04d}".format(i), label, "pdf"))
if need_update(tourfile, image_name):
fwagp = must_open(agpfile, "w")
order_to_agp(seqid, tour, sizes, fwagp, gapsize=gapsize,
gaptype="map")
fwagp.close()
logging.debug("{0} written to `{1}`".format(header, agpfile))
build([inputbed, scaffoldsfasta, "--cleanup"])
pdf_name = plot([inputbed, seqid, "--title={0}".format(label)])
sh("mv {0} {1}".format(pdf_name, image_name))
if label in ("INIT", "FLIP", "TSP", "FINAL"):
for j in xrange(5): # Delay for 5 frames
image_delay = image_name.rsplit(".", 1)[0] + \
".d{0}.pdf".format(j)
sh("cp {0} {1}/{2}".format(image_name, ffmpeg, image_delay))
else:
sh("cp {0} {1}/".format(image_name, ffmpeg))
i += 1
make_movie(ffmpeg, pf)
|
%prog movie input.bed scaffolds.fasta chr1
Visualize history of scaffold OO. The history is contained within the
tourfile, generated by path(). For each historical scaffold OO, the program
plots a separate PDF file. The plots can be combined to show the progression
as a little animation. The third argument limits the plotting to a
specific pseudomolecule, for example `chr1`.
|
def order_transforms(transforms):
"""Orders transforms to ensure proper chaining.
For example, if `transforms = [B, A, C]`, and `A` produces outputs needed
by `B`, the transforms will be re-rorderd to `[A, B, C]`.
Parameters
----------
transforms : list
List of transform instances to order.
Outputs
-------
list :
List of transformed ordered such that forward transforms can be carried
out without error.
"""
# get a set of all inputs and all outputs
outputs = set().union(*[t.outputs for t in transforms])
out = []
remaining = [t for t in transforms]
while remaining:
# pull out transforms that have no inputs in the set of outputs
leftover = []
for t in remaining:
if t.inputs.isdisjoint(outputs):
out.append(t)
outputs -= t.outputs
else:
leftover.append(t)
remaining = leftover
return out
|
Orders transforms to ensure proper chaining.
For example, if `transforms = [B, A, C]`, and `A` produces outputs needed
by `B`, the transforms will be re-rorderd to `[A, B, C]`.
Parameters
----------
transforms : list
List of transform instances to order.
Outputs
-------
list :
List of transformed ordered such that forward transforms can be carried
out without error.
|
def remove_option(self, section, name, value=None):
"""Remove an option from a unit
Args:
section (str): The section to remove from.
name (str): The item to remove.
value (str, optional): If specified, only the option matching this value will be removed
If not specified, all options with ``name`` in ``section`` will be removed
Returns:
True: At least one item was removed
False: The item requested to remove was not found
"""
# Don't allow updating units we loaded from fleet, it's not supported
if self._is_live():
raise RuntimeError('Submitted units cannot update their options')
removed = 0
# iterate through a copy of the options
for option in list(self._data['options']):
# if it's in our section
if option['section'] == section:
# and it matches our name
if option['name'] == name:
# and they didn't give us a value, or it macthes
if value is None or option['value'] == value:
# nuke it from the source
self._data['options'].remove(option)
removed += 1
if removed > 0:
return True
return False
|
Remove an option from a unit
Args:
section (str): The section to remove from.
name (str): The item to remove.
value (str, optional): If specified, only the option matching this value will be removed
If not specified, all options with ``name`` in ``section`` will be removed
Returns:
True: At least one item was removed
False: The item requested to remove was not found
|
def stub_request(self, expected_url, filename, status=None, body=None):
"""Stub a web request for testing."""
self.fake_web = True
self.faker = get_faker(expected_url, filename, status, body)
|
Stub a web request for testing.
|
def _write_packet(self, packet, sec=None, usec=None, caplen=None,
wirelen=None):
"""
Writes a single packet to the pcap file.
:param packet: Packet, or bytes for a single packet
:type packet: Packet or bytes
:param sec: time the packet was captured, in seconds since epoch. If
not supplied, defaults to now.
:type sec: int or long
:param usec: If ``nano=True``, then number of nanoseconds after the
second that the packet was captured. If ``nano=False``,
then the number of microseconds after the second the
packet was captured. If ``sec`` is not specified,
this value is ignored.
:type usec: int or long
:param caplen: The length of the packet in the capture file. If not
specified, uses ``len(raw(packet))``.
:type caplen: int
:param wirelen: The length of the packet on the wire. If not
specified, tries ``packet.wirelen``, otherwise uses
``caplen``.
:type wirelen: int
:returns: None
:rtype: None
"""
if hasattr(packet, "time"):
if sec is None:
sec = int(packet.time)
usec = int(round((packet.time - sec) *
(1000000000 if self.nano else 1000000)))
if usec is None:
usec = 0
rawpkt = raw(packet)
caplen = len(rawpkt) if caplen is None else caplen
if wirelen is None:
if hasattr(packet, "wirelen"):
wirelen = packet.wirelen
if wirelen is None:
wirelen = caplen
RawPcapWriter._write_packet(
self, rawpkt, sec=sec, usec=usec, caplen=caplen, wirelen=wirelen)
|
Writes a single packet to the pcap file.
:param packet: Packet, or bytes for a single packet
:type packet: Packet or bytes
:param sec: time the packet was captured, in seconds since epoch. If
not supplied, defaults to now.
:type sec: int or long
:param usec: If ``nano=True``, then number of nanoseconds after the
second that the packet was captured. If ``nano=False``,
then the number of microseconds after the second the
packet was captured. If ``sec`` is not specified,
this value is ignored.
:type usec: int or long
:param caplen: The length of the packet in the capture file. If not
specified, uses ``len(raw(packet))``.
:type caplen: int
:param wirelen: The length of the packet on the wire. If not
specified, tries ``packet.wirelen``, otherwise uses
``caplen``.
:type wirelen: int
:returns: None
:rtype: None
|
def conference_mute(self, call_params):
"""REST Conference Mute helper
"""
path = '/' + self.api_version + '/ConferenceMute/'
method = 'POST'
return self.request(path, method, call_params)
|
REST Conference Mute helper
|
def get_window_forecasts(self):
"""
Aggregate the forecasts within the specified time windows.
"""
for model_name in self.model_names:
self.window_forecasts[model_name] = {}
for size_threshold in self.size_thresholds:
self.window_forecasts[model_name][size_threshold] = \
np.array([self.raw_forecasts[model_name][size_threshold][sl].sum(axis=0)
for sl in self.hour_windows])
|
Aggregate the forecasts within the specified time windows.
|
def create_asset(self, ):
"""Create a asset and store it in the self.asset
:returns: None
:rtype: None
:raises: None
"""
name = self.name_le.text()
if not name:
self.name_le.setPlaceholderText("Please enter a name!")
return
desc = self.desc_pte.toPlainText()
if not self.atype:
atypei = self.atype_cb.currentIndex()
assert atypei >= 0
self.atype = self.atypes[atypei]
try:
asset = djadapter.models.Asset(atype=self.atype, project=self.project, name=name, description=desc)
asset.save()
self.asset = asset
self.accept()
except:
log.exception("Could not create new asset")
|
Create a asset and store it in the self.asset
:returns: None
:rtype: None
:raises: None
|
def enable_global_typelogged_profiler(flag = True):
"""Enables or disables global typelogging mode via a profiler.
See flag global_typelogged_profiler.
Does not work if typelogging_enabled is false.
"""
global global_typelogged_profiler, _global_type_agent, global_typechecked_profiler
global_typelogged_profiler = flag
if flag and typelogging_enabled:
if _global_type_agent is None:
_global_type_agent = TypeAgent()
_global_type_agent.start()
elif not _global_type_agent.active:
_global_type_agent.start()
elif not flag and not global_typechecked_profiler and \
not _global_type_agent is None and _global_type_agent.active:
_global_type_agent.stop()
|
Enables or disables global typelogging mode via a profiler.
See flag global_typelogged_profiler.
Does not work if typelogging_enabled is false.
|
def subscribeToDeviceCommands(self, typeId="+", deviceId="+", commandId="+", msgFormat="+"):
"""
Subscribe to device command messages
# Parameters
typeId (string): typeId for the subscription, optional. Defaults to all device types (MQTT `+` wildcard)
deviceId (string): deviceId for the subscription, optional. Defaults to all devices (MQTT `+` wildcard)
commandId (string): commandId for the subscription, optional. Defaults to all commands (MQTT `+` wildcard)
msgFormat (string): msgFormat for the subscription, optional. Defaults to all formats (MQTT `+` wildcard)
qos (int): MQTT quality of service level to use (`0`, `1`, or `2`)
# Returns
int: If the subscription was successful then the return Message ID (mid) for the subscribe request
will be returned. The mid value can be used to track the subscribe request by checking against
the mid argument if you register a subscriptionCallback method.
If the subscription fails then the return value will be `0`
"""
if self._config.isQuickstart():
self.logger.warning("QuickStart applications do not support commands")
return 0
topic = "iot-2/type/%s/id/%s/cmd/%s/fmt/%s" % (typeId, deviceId, commandId, msgFormat)
return self._subscribe(topic, 0)
|
Subscribe to device command messages
# Parameters
typeId (string): typeId for the subscription, optional. Defaults to all device types (MQTT `+` wildcard)
deviceId (string): deviceId for the subscription, optional. Defaults to all devices (MQTT `+` wildcard)
commandId (string): commandId for the subscription, optional. Defaults to all commands (MQTT `+` wildcard)
msgFormat (string): msgFormat for the subscription, optional. Defaults to all formats (MQTT `+` wildcard)
qos (int): MQTT quality of service level to use (`0`, `1`, or `2`)
# Returns
int: If the subscription was successful then the return Message ID (mid) for the subscribe request
will be returned. The mid value can be used to track the subscribe request by checking against
the mid argument if you register a subscriptionCallback method.
If the subscription fails then the return value will be `0`
|
def remove(self, child):
'''Remove a ``child`` from the list of :attr:`children`.'''
try:
self.children.remove(child)
if isinstance(child, String):
child._parent = None
except ValueError:
pass
|
Remove a ``child`` from the list of :attr:`children`.
|
def visit_keyword(self, node):
"""return an astroid.Keyword node as string"""
if node.arg is None:
return "**%s" % node.value.accept(self)
return "%s=%s" % (node.arg, node.value.accept(self))
|
return an astroid.Keyword node as string
|
def setup(service_manager, conf, reload_method="reload"):
"""Load services configuration from oslo config object.
It reads ServiceManager and Service configuration options from an
oslo_config.ConfigOpts() object. Also It registers a ServiceManager hook to
reload the configuration file on reload in the master process and in all
children. And then when each child start or reload, the configuration
options are logged if the oslo config option 'log_options' is True.
On children, the configuration file is reloaded before the running the
application reload method.
Options currently supported on ServiceManager and Service:
* graceful_shutdown_timeout
:param service_manager: ServiceManager instance
:type service_manager: cotyledon.ServiceManager
:param conf: Oslo Config object
:type conf: oslo_config.ConfigOpts()
:param reload_method: reload or mutate the config files
:type reload_method: str "reload/mutate"
"""
conf.register_opts(service_opts)
# Set cotyledon options from oslo config options
_load_service_manager_options(service_manager, conf)
def _service_manager_reload():
_configfile_reload(conf, reload_method)
_load_service_manager_options(service_manager, conf)
if os.name != "posix":
# NOTE(sileht): reloading can't be supported oslo.config is not pickle
# But we don't care SIGHUP is not support on window
return
service_manager.register_hooks(
on_new_worker=functools.partial(
_new_worker_hook, conf, reload_method),
on_reload=_service_manager_reload)
|
Load services configuration from oslo config object.
It reads ServiceManager and Service configuration options from an
oslo_config.ConfigOpts() object. Also It registers a ServiceManager hook to
reload the configuration file on reload in the master process and in all
children. And then when each child start or reload, the configuration
options are logged if the oslo config option 'log_options' is True.
On children, the configuration file is reloaded before the running the
application reload method.
Options currently supported on ServiceManager and Service:
* graceful_shutdown_timeout
:param service_manager: ServiceManager instance
:type service_manager: cotyledon.ServiceManager
:param conf: Oslo Config object
:type conf: oslo_config.ConfigOpts()
:param reload_method: reload or mutate the config files
:type reload_method: str "reload/mutate"
|
def rts_smoother(cls,state_dim, p_dynamic_callables, filter_means,
filter_covars):
"""
This function implements Rauch–Tung–Striebel(RTS) smoother algorithm
based on the results of kalman_filter_raw.
These notations are the same:
x_{k} = A_{k} * x_{k-1} + q_{k-1}; q_{k-1} ~ N(0, Q_{k-1})
y_{k} = H_{k} * x_{k} + r_{k}; r_{k-1} ~ N(0, R_{k})
Returns estimated smoother distributions x_{k} ~ N(m_{k}, P(k))
Input:
--------------
p_a: function (k, x_{k-1}, A_{k}). Dynamic function.
k (iteration number), starts at 0
x_{k-1} State from the previous step
A_{k} Jacobian matrices of f_a. In the linear case it is exactly A_{k}.
p_f_A: function (k, m, P) return Jacobian of dynamic function, it is
passed into p_a.
k (iteration number), starts at 0
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix.
p_f_Q: function (k). Returns noise matrix of dynamic model on iteration k.
k (iteration number). starts at 0
filter_means: (no_steps+1,state_dim) matrix or (no_steps+1,state_dim, time_series_no) 3D array
Results of the Kalman Filter means estimation.
filter_covars: (no_steps+1, state_dim, state_dim) 3D array
Results of the Kalman Filter covariance estimation.
Output:
-------------
M: (no_steps+1, state_dim) matrix
Smoothed estimates of the state means
P: (no_steps+1, state_dim, state_dim) 3D array
Smoothed estimates of the state covariances
"""
no_steps = filter_covars.shape[0]-1# number of steps (minus initial covariance)
M = np.empty(filter_means.shape) # smoothed means
P = np.empty(filter_covars.shape) # smoothed covars
#G = np.empty( (no_steps,state_dim,state_dim) ) # G from the update step of the smoother
M[-1,:] = filter_means[-1,:]
P[-1,:,:] = filter_covars[-1,:,:]
for k in range(no_steps-1,-1,-1):
m_pred, P_pred, tmp1, tmp2 = \
cls._kalman_prediction_step(k, filter_means[k,:],
filter_covars[k,:,:], p_dynamic_callables,
calc_grad_log_likelihood=False)
p_m = filter_means[k,:]
if len(p_m.shape)<2:
p_m.shape = (p_m.shape[0],1)
p_m_prev_step = M[k+1,:]
if len(p_m_prev_step.shape)<2:
p_m_prev_step.shape = (p_m_prev_step.shape[0],1)
m_upd, P_upd, G_tmp = cls._rts_smoother_update_step(k,
p_m ,filter_covars[k,:,:],
m_pred, P_pred, p_m_prev_step ,P[k+1,:,:], p_dynamic_callables)
M[k,:] = m_upd#np.squeeze(m_upd)
P[k,:,:] = P_upd
#G[k,:,:] = G_upd.T # store transposed G.
# Return values
return (M, P)
|
This function implements Rauch–Tung–Striebel(RTS) smoother algorithm
based on the results of kalman_filter_raw.
These notations are the same:
x_{k} = A_{k} * x_{k-1} + q_{k-1}; q_{k-1} ~ N(0, Q_{k-1})
y_{k} = H_{k} * x_{k} + r_{k}; r_{k-1} ~ N(0, R_{k})
Returns estimated smoother distributions x_{k} ~ N(m_{k}, P(k))
Input:
--------------
p_a: function (k, x_{k-1}, A_{k}). Dynamic function.
k (iteration number), starts at 0
x_{k-1} State from the previous step
A_{k} Jacobian matrices of f_a. In the linear case it is exactly A_{k}.
p_f_A: function (k, m, P) return Jacobian of dynamic function, it is
passed into p_a.
k (iteration number), starts at 0
m: point where Jacobian is evaluated
P: parameter for Jacobian, usually covariance matrix.
p_f_Q: function (k). Returns noise matrix of dynamic model on iteration k.
k (iteration number). starts at 0
filter_means: (no_steps+1,state_dim) matrix or (no_steps+1,state_dim, time_series_no) 3D array
Results of the Kalman Filter means estimation.
filter_covars: (no_steps+1, state_dim, state_dim) 3D array
Results of the Kalman Filter covariance estimation.
Output:
-------------
M: (no_steps+1, state_dim) matrix
Smoothed estimates of the state means
P: (no_steps+1, state_dim, state_dim) 3D array
Smoothed estimates of the state covariances
|
def runblast(self, assembly, allele, sample):
"""
Run the BLAST analyses
:param assembly: assembly path/file
:param allele: combined allele file
:param sample: sample object
:return:
"""
genome = os.path.split(assembly)[1].split('.')[0]
# Run the BioPython BLASTn module with the genome as query, fasta(target gene) as db.
# Do not re-perform the BLAST search each time
make_path(sample[self.analysistype].reportdir)
try:
report = glob('{}{}*rawresults*'.format(sample[self.analysistype].reportdir, genome))[0]
size = os.path.getsize(report)
if size == 0:
os.remove(report)
report = '{}{}_rawresults_{:}.csv'.format(sample[self.analysistype].reportdir, genome,
time.strftime("%Y.%m.%d.%H.%M.%S"))
except IndexError:
report = '{}{}_rawresults_{:}.csv'.format(sample[self.analysistype].reportdir, genome,
time.strftime("%Y.%m.%d.%H.%M.%S"))
db = allele.split('.')[0]
# BLAST command line call. Note the mildly restrictive evalue, and the high number of alignments.
# Due to the fact that all the targets are combined into one database, this is to ensure that all potential
# alignments are reported. Also note the custom outfmt: the doubled quotes are necessary to get it work
blastn = NcbiblastnCommandline(query=assembly, db=db, evalue='1E-20', num_alignments=1000000,
num_threads=12,
outfmt="'6 qseqid sseqid positive mismatch gaps "
"evalue bitscore slen length qstart qend qseq sstart send'",
out=report)
# Save the blast command in the metadata
sample[self.analysistype].blastcommand = str(blastn)
sample[self.analysistype].blastreport = report
if not os.path.isfile(report):
# Run BLAST
blastn()
# Run the blast parsing module
self.blastparser(report, sample)
|
Run the BLAST analyses
:param assembly: assembly path/file
:param allele: combined allele file
:param sample: sample object
:return:
|
def job_status(job_id, show_job_key=False, ignore_auth=False):
'''Show a specific job.
**Results:**
:rtype: A dictionary with the following keys
:param status: Status of job (complete, error)
:type status: string
:param sent_data: Input data for job
:type sent_data: json encodable data
:param job_id: An identifier for the job
:type job_id: string
:param result_url: Callback url
:type result_url: url string
:param data: Results from job.
:type data: json encodable data
:param error: Error raised during job execution
:type error: string
:param metadata: Metadata provided when submitting job.
:type metadata: list of key - value pairs
:param requested_timestamp: Time the job started
:type requested_timestamp: timestamp
:param finished_timestamp: Time the job finished
:type finished_timestamp: timestamp
:statuscode 200: no error
:statuscode 403: not authorized to view the job's data
:statuscode 404: job id not found
:statuscode 409: an error occurred
'''
job_dict = db.get_job(job_id)
if not job_dict:
return json.dumps({'error': 'job_id not found'}), 404, headers
if not ignore_auth and not is_authorized(job_dict):
return json.dumps({'error': 'not authorized'}), 403, headers
job_dict.pop('api_key', None)
if not show_job_key:
job_dict.pop('job_key', None)
return flask.Response(json.dumps(job_dict, cls=DatetimeJsonEncoder),
mimetype='application/json')
|
Show a specific job.
**Results:**
:rtype: A dictionary with the following keys
:param status: Status of job (complete, error)
:type status: string
:param sent_data: Input data for job
:type sent_data: json encodable data
:param job_id: An identifier for the job
:type job_id: string
:param result_url: Callback url
:type result_url: url string
:param data: Results from job.
:type data: json encodable data
:param error: Error raised during job execution
:type error: string
:param metadata: Metadata provided when submitting job.
:type metadata: list of key - value pairs
:param requested_timestamp: Time the job started
:type requested_timestamp: timestamp
:param finished_timestamp: Time the job finished
:type finished_timestamp: timestamp
:statuscode 200: no error
:statuscode 403: not authorized to view the job's data
:statuscode 404: job id not found
:statuscode 409: an error occurred
|
def read_config(self, correlation_id, parameters):
"""
Reads configuration and parameterize it with given values.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param parameters: values to parameters the configuration or null to skip parameterization.
:return: ConfigParams configuration.
"""
value = self._read_object(correlation_id, parameters)
return ConfigParams.from_value(value)
|
Reads configuration and parameterize it with given values.
:param correlation_id: (optional) transaction id to trace execution through call chain.
:param parameters: values to parameters the configuration or null to skip parameterization.
:return: ConfigParams configuration.
|
def register_callback_subscribed(self, callback):
"""
Register a callback for new subscription. This gets called whenever one of *your* things subscribes to something
else.
`Note` it is not called when whenever something else subscribes to your thing.
The payload passed to your callback is either a
[RemoteControl](RemotePoint.m.html#IoticAgent.IOT.RemotePoint.RemoteControl) or
[RemoteFeed](RemotePoint.m.html#IoticAgent.IOT.RemotePoint.RemoteFeed) instance.
"""
return self.__client.register_callback_created(partial(self.__callback_subscribed_filter, callback),
serialised=False)
|
Register a callback for new subscription. This gets called whenever one of *your* things subscribes to something
else.
`Note` it is not called when whenever something else subscribes to your thing.
The payload passed to your callback is either a
[RemoteControl](RemotePoint.m.html#IoticAgent.IOT.RemotePoint.RemoteControl) or
[RemoteFeed](RemotePoint.m.html#IoticAgent.IOT.RemotePoint.RemoteFeed) instance.
|
def parse_line(self, line, lineno):
"""Parse a single line of the log.
We have to handle both buildbot style logs as well as Taskcluster logs. The latter
attempt to emulate the buildbot logs, but don't accurately do so, partly due
to the way logs are generated in Taskcluster (ie: on the workers themselves).
Buildbot logs:
builder: ...
slave: ...
starttime: ...
results: ...
buildid: ...
builduid: ...
revision: ...
======= <step START marker> =======
<step log output>
======= <step FINISH marker> =======
======= <step START marker> =======
<step log output>
======= <step FINISH marker> =======
Taskcluster logs (a worst-case example):
<log output outside a step>
======= <step START marker> =======
<step log output>
======= <step FINISH marker> =======
<log output outside a step>
======= <step START marker> =======
<step log output with no following finish marker>
As can be seen above, Taskcluster logs can have (a) log output that falls between
step markers, and (b) content at the end of the log, that is not followed by a
final finish step marker. We handle this by creating generic placeholder steps to
hold the log output that is not enclosed by step markers, and then by cleaning up
the final step in finish_parse() once all lines have been parsed.
"""
if not line.strip():
# Skip whitespace-only lines, since they will never contain an error line,
# so are not of interest. This also avoids creating spurious unnamed steps
# (which occurs when we find content outside of step markers) for the
# newlines that separate the steps in Buildbot logs.
return
if self.state == self.STATES['awaiting_first_step'] and self.RE_HEADER_LINE.match(line):
# The "key: value" job metadata header lines that appear at the top of
# Buildbot logs would result in the creation of an unnamed step at the
# start of the job, unless we skip them. (Which is not desired, since
# the lines are metadata and not test/build output.)
return
step_marker_match = self.RE_STEP_MARKER.match(line)
if not step_marker_match:
# This is a normal log line, rather than a step marker. (The common case.)
if self.state != self.STATES['step_in_progress']:
# We don't have an in-progress step, so need to start one, even though this
# isn't a "step started" marker line. We therefore create a new generic step,
# since we have no way of finding out the step metadata. This case occurs
# for the Taskcluster logs where content can fall between step markers.
self.start_step(lineno)
# Parse the line for errors, which if found, will be associated with the current step.
self.sub_parser.parse_line(line, lineno)
return
# This is either a "step started" or "step finished" marker line, eg:
# ========= Started foo (results: 0, elapsed: 0 secs) (at 2015-08-17 02:33:56.353866) =========
# ========= Finished foo (results: 0, elapsed: 0 secs) (at 2015-08-17 02:33:56.354301) =========
if step_marker_match.group('marker_type') == 'Started':
if self.state == self.STATES['step_in_progress']:
# We're partway through a step (ie: haven't seen a "step finished" marker line),
# but have now reached the "step started" marker for the next step. Before we
# can start the new step, we have to clean up the previous one - albeit using
# generic step metadata, since there was no "step finished" marker. This occurs
# in Taskcluster's logs when content falls between the step marker lines.
self.end_step(lineno)
# Start a new step using the extracted step metadata.
self.start_step(lineno,
name=step_marker_match.group('name'),
timestamp=step_marker_match.group('timestamp'))
return
# This is a "step finished" marker line.
if self.state != self.STATES['step_in_progress']:
# We're not in the middle of a step, so can't finish one. Just ignore the marker line.
return
# Close out the current step using the extracted step metadata.
self.end_step(lineno,
timestamp=step_marker_match.group('timestamp'),
result_code=int(step_marker_match.group('result_code')))
|
Parse a single line of the log.
We have to handle both buildbot style logs as well as Taskcluster logs. The latter
attempt to emulate the buildbot logs, but don't accurately do so, partly due
to the way logs are generated in Taskcluster (ie: on the workers themselves).
Buildbot logs:
builder: ...
slave: ...
starttime: ...
results: ...
buildid: ...
builduid: ...
revision: ...
======= <step START marker> =======
<step log output>
======= <step FINISH marker> =======
======= <step START marker> =======
<step log output>
======= <step FINISH marker> =======
Taskcluster logs (a worst-case example):
<log output outside a step>
======= <step START marker> =======
<step log output>
======= <step FINISH marker> =======
<log output outside a step>
======= <step START marker> =======
<step log output with no following finish marker>
As can be seen above, Taskcluster logs can have (a) log output that falls between
step markers, and (b) content at the end of the log, that is not followed by a
final finish step marker. We handle this by creating generic placeholder steps to
hold the log output that is not enclosed by step markers, and then by cleaning up
the final step in finish_parse() once all lines have been parsed.
|
def _intersection_with_dsis(self, dsis):
"""
Intersection with another :class:`DiscreteStridedIntervalSet`.
:param dsis: The other operand.
:return:
"""
new_si_set = set()
for si in dsis._si_set:
r = self._intersection_with_si(si)
if isinstance(r, StridedInterval):
if not r.is_empty:
new_si_set.add(r)
else: # r is a DiscreteStridedIntervalSet
new_si_set |= r._si_set
if len(new_si_set):
ret = DiscreteStridedIntervalSet(bits=self.bits, si_set=new_si_set)
return ret.normalize()
else:
return StridedInterval.empty(self.bits)
|
Intersection with another :class:`DiscreteStridedIntervalSet`.
:param dsis: The other operand.
:return:
|
def build_managers(app, conf):
"""
Takes in a config file as outlined in job_managers.ini.sample and builds
a dictionary of job manager objects from them.
"""
# Load default options from config file that apply to all
# managers.
default_options = _get_default_options(conf)
manager_descriptions = ManagerDescriptions()
if "job_managers_config" in conf:
job_managers_config = conf.get("job_managers_config", None)
_populate_manager_descriptions_from_ini(manager_descriptions, job_managers_config)
elif "managers" in conf:
for manager_name, manager_options in conf["managers"].items():
manager_description = ManagerDescription.from_dict(manager_options, manager_name)
manager_descriptions.add(manager_description)
elif "manager" in conf:
manager_description = ManagerDescription.from_dict(conf["manager"])
manager_descriptions.add(manager_description)
else:
manager_descriptions.add(ManagerDescription())
manager_classes = _get_managers_dict()
managers = {}
for manager_name, manager_description in manager_descriptions.descriptions.items():
manager_options = dict(default_options)
manager_options.update(manager_description.manager_options)
manager_class = manager_classes[manager_description.manager_type]
manager = _build_manager(manager_class, app, manager_name, manager_options)
managers[manager_name] = manager
return managers
|
Takes in a config file as outlined in job_managers.ini.sample and builds
a dictionary of job manager objects from them.
|
def _init_objaartall(self):
"""Get background database info for making ASCII art."""
kws = {
'sortgo':lambda nt: [nt.NS, nt.dcnt],
# fmtgo=('{p_fdr_bh:8.2e} {GO} '
# Formatting for GO terms in grouped GO list
'fmtgo':('{hdr1usr01:2} {NS} {GO} {s_fdr_bh:8} '
'{dcnt:5} {childcnt:3} R{reldepth:02} '
'{D1:5} {GO_name} ({study_count} study genes)\n'),
# Formatting for GO terms listed under each gene
'fmtgo2':('{hdr1usr01:2} {NS} {GO} {s_fdr_bh:8} '
'{dcnt:5} R{reldepth:02} '
'{GO_name} ({study_count} study genes)\n'),
# itemid2name=ensmusg2symbol}
}
return AArtGeneProductSetsAll(self.grprdflt, self.hdrobj, **kws)
|
Get background database info for making ASCII art.
|
def get_program(name, config, ptype="cmd", default=None):
"""Retrieve program information from the configuration.
This handles back compatible location specification in input
YAML. The preferred location for program information is in
`resources` but the older `program` tag is also supported.
"""
# support taking in the data dictionary
config = config.get("config", config)
try:
pconfig = config.get("resources", {})[name]
# If have leftover old
except KeyError:
pconfig = {}
old_config = config.get("program", {}).get(name, None)
if old_config:
for key in ["dir", "cmd"]:
if not key in pconfig:
pconfig[key] = old_config
if ptype == "cmd":
return _get_program_cmd(name, pconfig, config, default)
elif ptype == "dir":
return _get_program_dir(name, pconfig)
else:
raise ValueError("Don't understand program type: %s" % ptype)
|
Retrieve program information from the configuration.
This handles back compatible location specification in input
YAML. The preferred location for program information is in
`resources` but the older `program` tag is also supported.
|
def _get_error_message(response):
"""Attempt to extract an error message from response body"""
try:
data = response.json()
if "error_description" in data:
return data['error_description']
if "error" in data:
return data['error']
except Exception:
pass
return "Unknown error"
|
Attempt to extract an error message from response body
|
def p_annotation_spdx_id_1(self, p):
"""annotation_spdx_id : ANNOTATION_SPDX_ID LINE"""
try:
if six.PY2:
value = p[2].decode(encoding='utf-8')
else:
value = p[2]
self.builder.set_annotation_spdx_id(self.document, value)
except CardinalityError:
self.more_than_one_error('SPDXREF', p.lineno(1))
except OrderError:
self.order_error('SPDXREF', 'Annotator', p.lineno(1))
|
annotation_spdx_id : ANNOTATION_SPDX_ID LINE
|
def track_time(self, name, description='', max_rows=None):
"""
Create a Timer object in the Tracker.
"""
if name in self._tables:
raise TableConflictError(name)
if max_rows is None:
max_rows = AnonymousUsageTracker.MAX_ROWS_PER_TABLE
self.register_table(name, self.uuid, 'Timer', description)
self._tables[name] = Timer(name, self, max_rows=max_rows)
|
Create a Timer object in the Tracker.
|
def save_as_pil(self, fname, pixel_array=None):
""" This method saves the image from a numpy array using Pillow
(PIL fork)
:param fname: Location and name of the image file to be saved.
:param pixel_array: Numpy pixel array, i.e. ``numpy()`` return value
This method will return True if successful
"""
if pixel_array is None:
pixel_array = self.numpy
from PIL import Image as pillow
pil_image = pillow.fromarray(pixel_array.astype('uint8'))
pil_image.save(fname)
return True
|
This method saves the image from a numpy array using Pillow
(PIL fork)
:param fname: Location and name of the image file to be saved.
:param pixel_array: Numpy pixel array, i.e. ``numpy()`` return value
This method will return True if successful
|
def get_opener(self, protocol):
# type: (Text) -> Opener
"""Get the opener class associated to a given protocol.
Arguments:
protocol (str): A filesystem protocol.
Returns:
Opener: an opener instance.
Raises:
~fs.opener.errors.UnsupportedProtocol: If no opener
could be found for the given protocol.
EntryPointLoadingError: If the returned entry point
is not an `Opener` subclass or could not be loaded
successfully.
"""
protocol = protocol or self.default_opener
if self.load_extern:
entry_point = next(
pkg_resources.iter_entry_points("fs.opener", protocol), None
)
else:
entry_point = None
# If not entry point was loaded from the extensions, try looking
# into the registered protocols
if entry_point is None:
if protocol in self._protocols:
opener_instance = self._protocols[protocol]
else:
raise UnsupportedProtocol(
"protocol '{}' is not supported".format(protocol)
)
# If an entry point was found in an extension, attempt to load it
else:
try:
opener = entry_point.load()
except Exception as exception:
raise EntryPointError(
"could not load entry point; {}".format(exception)
)
if not issubclass(opener, Opener):
raise EntryPointError("entry point did not return an opener")
try:
opener_instance = opener()
except Exception as exception:
raise EntryPointError(
"could not instantiate opener; {}".format(exception)
)
return opener_instance
|
Get the opener class associated to a given protocol.
Arguments:
protocol (str): A filesystem protocol.
Returns:
Opener: an opener instance.
Raises:
~fs.opener.errors.UnsupportedProtocol: If no opener
could be found for the given protocol.
EntryPointLoadingError: If the returned entry point
is not an `Opener` subclass or could not be loaded
successfully.
|
def delete_all_but(self, prefix, name):
"""
:param prefix: INDEX MUST HAVE THIS AS A PREFIX AND THE REMAINDER MUST BE DATE_TIME
:param name: INDEX WITH THIS NAME IS NOT DELETED
:return:
"""
if prefix == name:
Log.note("{{index_name}} will not be deleted", {"index_name": prefix})
for a in self.get_aliases():
# MATCH <prefix>YYMMDD_HHMMSS FORMAT
if re.match(re.escape(prefix) + "\\d{8}_\\d{6}", a.index) and a.index != name:
self.delete_index(a.index)
|
:param prefix: INDEX MUST HAVE THIS AS A PREFIX AND THE REMAINDER MUST BE DATE_TIME
:param name: INDEX WITH THIS NAME IS NOT DELETED
:return:
|
def get_undefined_namespaces(graph: BELGraph) -> Set[str]:
"""Get all namespaces that are used in the BEL graph aren't actually defined."""
return {
exc.namespace
for _, exc, _ in graph.warnings
if isinstance(exc, UndefinedNamespaceWarning)
}
|
Get all namespaces that are used in the BEL graph aren't actually defined.
|
def cons(self, i):
""" True iff b[i] is a consonant """
if self.b[i] in 'aeiou':
return False
elif self.b[i] == 'y':
return True if i == 0 else not self.cons(i-1)
return True
|
True iff b[i] is a consonant
|
def load(ctx, variant_source, family_file, family_type, root):
"""
Load a variant source into the database.
If no database was found run puzzle init first.
1. VCF: If a vcf file is used it can be loaded with a ped file
2. GEMINI: Ped information will be retreived from the gemini db
"""
root = root or ctx.obj.get('root') or os.path.expanduser("~/.puzzle")
if os.path.isfile(root):
logger.error("'root' can't be a file")
ctx.abort()
logger.info("Root directory is: {}".format(root))
db_path = os.path.join(root, 'puzzle_db.sqlite3')
logger.info("db path is: {}".format(db_path))
if not os.path.exists(db_path):
logger.warn("database not initialized, run 'puzzle init'")
ctx.abort()
if not os.path.isfile(variant_source):
logger.error("Variant source has to be a file")
ctx.abort()
mode = get_file_type(variant_source)
if mode == 'unknown':
logger.error("Unknown file type")
ctx.abort()
#Test if gemini is installed
elif mode == 'gemini':
logger.debug("Initialzing GEMINI plugin")
if not GEMINI:
logger.error("Need to have gemini installed to use gemini plugin")
ctx.abort()
logger.debug('Set puzzle backend to {0}'.format(mode))
variant_type = get_variant_type(variant_source)
logger.debug('Set variant type to {0}'.format(variant_type))
cases = get_cases(
variant_source=variant_source,
case_lines=family_file,
case_type=family_type,
variant_type=variant_type,
variant_mode=mode
)
if len(cases) == 0:
logger.warning("No cases found")
ctx.abort()
logger.info("Initializing sqlite plugin")
store = SqlStore(db_path)
for case_obj in cases:
if store.case(case_obj.case_id) is not None:
logger.warn("{} already exists in the database"
.format(case_obj.case_id))
continue
# extract case information
logger.debug("adding case: {} to puzzle db".format(case_obj.case_id))
store.add_case(case_obj, vtype=variant_type, mode=mode)
|
Load a variant source into the database.
If no database was found run puzzle init first.
1. VCF: If a vcf file is used it can be loaded with a ped file
2. GEMINI: Ped information will be retreived from the gemini db
|
def parse(text):
"""Parse the given text into metadata and strip it for a Markdown parser.
:param text: text to be parsed
"""
rv = {}
m = META.match(text)
while m:
key = m.group(1)
value = m.group(2)
value = INDENTATION.sub('\n', value.strip())
rv[key] = value
text = text[len(m.group(0)):]
m = META.match(text)
return rv, text
|
Parse the given text into metadata and strip it for a Markdown parser.
:param text: text to be parsed
|
def allFileExists(fileList):
"""Check that all file exists.
:param fileList: the list of file to check.
:type fileList: list
Check if all the files in ``fileList`` exists.
"""
allExists = True
for fileName in fileList:
allExists = allExists and os.path.isfile(fileName)
return allExists
|
Check that all file exists.
:param fileList: the list of file to check.
:type fileList: list
Check if all the files in ``fileList`` exists.
|
def rpc_name(rpc_id):
"""Map an RPC id to a string name.
This function looks the RPC up in a map of all globally declared RPCs,
and returns a nice name string. if the RPC is not found in the global
name map, returns a generic name string such as 'rpc 0x%04X'.
Args:
rpc_id (int): The id of the RPC that we wish to look up.
Returns:
str: The nice name of the RPC.
"""
name = _RPC_NAME_MAP.get(rpc_id)
if name is None:
name = 'RPC 0x%04X' % rpc_id
return name
|
Map an RPC id to a string name.
This function looks the RPC up in a map of all globally declared RPCs,
and returns a nice name string. if the RPC is not found in the global
name map, returns a generic name string such as 'rpc 0x%04X'.
Args:
rpc_id (int): The id of the RPC that we wish to look up.
Returns:
str: The nice name of the RPC.
|
def _handshake(self):
"""
Perform an initial TLS handshake
"""
session_context = None
ssl_policy_ref = None
crl_search_ref = None
crl_policy_ref = None
ocsp_search_ref = None
ocsp_policy_ref = None
policy_array_ref = None
try:
if osx_version_info < (10, 8):
session_context_pointer = new(Security, 'SSLContextRef *')
result = Security.SSLNewContext(False, session_context_pointer)
handle_sec_error(result)
session_context = unwrap(session_context_pointer)
else:
session_context = Security.SSLCreateContext(
null(),
SecurityConst.kSSLClientSide,
SecurityConst.kSSLStreamType
)
result = Security.SSLSetIOFuncs(
session_context,
_read_callback_pointer,
_write_callback_pointer
)
handle_sec_error(result)
self._connection_id = id(self) % 2147483647
_connection_refs[self._connection_id] = self
_socket_refs[self._connection_id] = self._socket
result = Security.SSLSetConnection(session_context, self._connection_id)
handle_sec_error(result)
utf8_domain = self._hostname.encode('utf-8')
result = Security.SSLSetPeerDomainName(
session_context,
utf8_domain,
len(utf8_domain)
)
handle_sec_error(result)
if osx_version_info >= (10, 10):
disable_auto_validation = self._session._manual_validation or self._session._extra_trust_roots
explicit_validation = (not self._session._manual_validation) and self._session._extra_trust_roots
else:
disable_auto_validation = True
explicit_validation = not self._session._manual_validation
# Ensure requested protocol support is set for the session
if osx_version_info < (10, 8):
for protocol in ['SSLv2', 'SSLv3', 'TLSv1']:
protocol_const = _PROTOCOL_STRING_CONST_MAP[protocol]
enabled = protocol in self._session._protocols
result = Security.SSLSetProtocolVersionEnabled(
session_context,
protocol_const,
enabled
)
handle_sec_error(result)
if disable_auto_validation:
result = Security.SSLSetEnableCertVerify(session_context, False)
handle_sec_error(result)
else:
protocol_consts = [_PROTOCOL_STRING_CONST_MAP[protocol] for protocol in self._session._protocols]
min_protocol = min(protocol_consts)
max_protocol = max(protocol_consts)
result = Security.SSLSetProtocolVersionMin(
session_context,
min_protocol
)
handle_sec_error(result)
result = Security.SSLSetProtocolVersionMax(
session_context,
max_protocol
)
handle_sec_error(result)
if disable_auto_validation:
result = Security.SSLSetSessionOption(
session_context,
SecurityConst.kSSLSessionOptionBreakOnServerAuth,
True
)
handle_sec_error(result)
# Disable all sorts of bad cipher suites
supported_ciphers_pointer = new(Security, 'size_t *')
result = Security.SSLGetNumberSupportedCiphers(session_context, supported_ciphers_pointer)
handle_sec_error(result)
supported_ciphers = deref(supported_ciphers_pointer)
cipher_buffer = buffer_from_bytes(supported_ciphers * 4)
supported_cipher_suites_pointer = cast(Security, 'uint32_t *', cipher_buffer)
result = Security.SSLGetSupportedCiphers(
session_context,
supported_cipher_suites_pointer,
supported_ciphers_pointer
)
handle_sec_error(result)
supported_ciphers = deref(supported_ciphers_pointer)
supported_cipher_suites = array_from_pointer(
Security,
'uint32_t',
supported_cipher_suites_pointer,
supported_ciphers
)
good_ciphers = []
for supported_cipher_suite in supported_cipher_suites:
cipher_suite = int_to_bytes(supported_cipher_suite, width=2)
cipher_suite_name = CIPHER_SUITE_MAP.get(cipher_suite, cipher_suite)
good_cipher = _cipher_blacklist_regex.search(cipher_suite_name) is None
if good_cipher:
good_ciphers.append(supported_cipher_suite)
num_good_ciphers = len(good_ciphers)
good_ciphers_array = new(Security, 'uint32_t[]', num_good_ciphers)
array_set(good_ciphers_array, good_ciphers)
good_ciphers_pointer = cast(Security, 'uint32_t *', good_ciphers_array)
result = Security.SSLSetEnabledCiphers(
session_context,
good_ciphers_pointer,
num_good_ciphers
)
handle_sec_error(result)
# Set a peer id from the session to allow for session reuse, the hostname
# is appended to prevent a bug on OS X 10.7 where it tries to reuse a
# connection even if the hostnames are different.
peer_id = self._session._peer_id + self._hostname.encode('utf-8')
result = Security.SSLSetPeerID(session_context, peer_id, len(peer_id))
handle_sec_error(result)
handshake_result = Security.SSLHandshake(session_context)
if self._exception is not None:
exception = self._exception
self._exception = None
raise exception
while handshake_result == SecurityConst.errSSLWouldBlock:
handshake_result = Security.SSLHandshake(session_context)
if self._exception is not None:
exception = self._exception
self._exception = None
raise exception
if osx_version_info < (10, 8) and osx_version_info >= (10, 7):
do_validation = explicit_validation and handshake_result == 0
else:
do_validation = explicit_validation and handshake_result == SecurityConst.errSSLServerAuthCompleted
if do_validation:
trust_ref_pointer = new(Security, 'SecTrustRef *')
result = Security.SSLCopyPeerTrust(
session_context,
trust_ref_pointer
)
handle_sec_error(result)
trust_ref = unwrap(trust_ref_pointer)
cf_string_hostname = CFHelpers.cf_string_from_unicode(self._hostname)
ssl_policy_ref = Security.SecPolicyCreateSSL(True, cf_string_hostname)
result = CoreFoundation.CFRelease(cf_string_hostname)
handle_cf_error(result)
# Create a new policy for OCSP checking to disable it
ocsp_oid_pointer = struct(Security, 'CSSM_OID')
ocsp_oid = unwrap(ocsp_oid_pointer)
ocsp_oid.Length = len(SecurityConst.APPLE_TP_REVOCATION_OCSP)
ocsp_oid_buffer = buffer_from_bytes(SecurityConst.APPLE_TP_REVOCATION_OCSP)
ocsp_oid.Data = cast(Security, 'char *', ocsp_oid_buffer)
ocsp_search_ref_pointer = new(Security, 'SecPolicySearchRef *')
result = Security.SecPolicySearchCreate(
SecurityConst.CSSM_CERT_X_509v3,
ocsp_oid_pointer,
null(),
ocsp_search_ref_pointer
)
handle_sec_error(result)
ocsp_search_ref = unwrap(ocsp_search_ref_pointer)
ocsp_policy_ref_pointer = new(Security, 'SecPolicyRef *')
result = Security.SecPolicySearchCopyNext(ocsp_search_ref, ocsp_policy_ref_pointer)
handle_sec_error(result)
ocsp_policy_ref = unwrap(ocsp_policy_ref_pointer)
ocsp_struct_pointer = struct(Security, 'CSSM_APPLE_TP_OCSP_OPTIONS')
ocsp_struct = unwrap(ocsp_struct_pointer)
ocsp_struct.Version = SecurityConst.CSSM_APPLE_TP_OCSP_OPTS_VERSION
ocsp_struct.Flags = (
SecurityConst.CSSM_TP_ACTION_OCSP_DISABLE_NET |
SecurityConst.CSSM_TP_ACTION_OCSP_CACHE_READ_DISABLE
)
ocsp_struct_bytes = struct_bytes(ocsp_struct_pointer)
cssm_data_pointer = struct(Security, 'CSSM_DATA')
cssm_data = unwrap(cssm_data_pointer)
cssm_data.Length = len(ocsp_struct_bytes)
ocsp_struct_buffer = buffer_from_bytes(ocsp_struct_bytes)
cssm_data.Data = cast(Security, 'char *', ocsp_struct_buffer)
result = Security.SecPolicySetValue(ocsp_policy_ref, cssm_data_pointer)
handle_sec_error(result)
# Create a new policy for CRL checking to disable it
crl_oid_pointer = struct(Security, 'CSSM_OID')
crl_oid = unwrap(crl_oid_pointer)
crl_oid.Length = len(SecurityConst.APPLE_TP_REVOCATION_CRL)
crl_oid_buffer = buffer_from_bytes(SecurityConst.APPLE_TP_REVOCATION_CRL)
crl_oid.Data = cast(Security, 'char *', crl_oid_buffer)
crl_search_ref_pointer = new(Security, 'SecPolicySearchRef *')
result = Security.SecPolicySearchCreate(
SecurityConst.CSSM_CERT_X_509v3,
crl_oid_pointer,
null(),
crl_search_ref_pointer
)
handle_sec_error(result)
crl_search_ref = unwrap(crl_search_ref_pointer)
crl_policy_ref_pointer = new(Security, 'SecPolicyRef *')
result = Security.SecPolicySearchCopyNext(crl_search_ref, crl_policy_ref_pointer)
handle_sec_error(result)
crl_policy_ref = unwrap(crl_policy_ref_pointer)
crl_struct_pointer = struct(Security, 'CSSM_APPLE_TP_CRL_OPTIONS')
crl_struct = unwrap(crl_struct_pointer)
crl_struct.Version = SecurityConst.CSSM_APPLE_TP_CRL_OPTS_VERSION
crl_struct.CrlFlags = 0
crl_struct_bytes = struct_bytes(crl_struct_pointer)
cssm_data_pointer = struct(Security, 'CSSM_DATA')
cssm_data = unwrap(cssm_data_pointer)
cssm_data.Length = len(crl_struct_bytes)
crl_struct_buffer = buffer_from_bytes(crl_struct_bytes)
cssm_data.Data = cast(Security, 'char *', crl_struct_buffer)
result = Security.SecPolicySetValue(crl_policy_ref, cssm_data_pointer)
handle_sec_error(result)
policy_array_ref = CFHelpers.cf_array_from_list([
ssl_policy_ref,
crl_policy_ref,
ocsp_policy_ref
])
result = Security.SecTrustSetPolicies(trust_ref, policy_array_ref)
handle_sec_error(result)
if self._session._extra_trust_roots:
ca_cert_refs = []
ca_certs = []
for cert in self._session._extra_trust_roots:
ca_cert = load_certificate(cert)
ca_certs.append(ca_cert)
ca_cert_refs.append(ca_cert.sec_certificate_ref)
result = Security.SecTrustSetAnchorCertificatesOnly(trust_ref, False)
handle_sec_error(result)
array_ref = CFHelpers.cf_array_from_list(ca_cert_refs)
result = Security.SecTrustSetAnchorCertificates(trust_ref, array_ref)
handle_sec_error(result)
result_pointer = new(Security, 'SecTrustResultType *')
result = Security.SecTrustEvaluate(trust_ref, result_pointer)
handle_sec_error(result)
trust_result_code = deref(result_pointer)
invalid_chain_error_codes = set([
SecurityConst.kSecTrustResultProceed,
SecurityConst.kSecTrustResultUnspecified
])
if trust_result_code not in invalid_chain_error_codes:
handshake_result = SecurityConst.errSSLXCertChainInvalid
else:
handshake_result = Security.SSLHandshake(session_context)
while handshake_result == SecurityConst.errSSLWouldBlock:
handshake_result = Security.SSLHandshake(session_context)
self._done_handshake = True
handshake_error_codes = set([
SecurityConst.errSSLXCertChainInvalid,
SecurityConst.errSSLCertExpired,
SecurityConst.errSSLCertNotYetValid,
SecurityConst.errSSLUnknownRootCert,
SecurityConst.errSSLNoRootCert,
SecurityConst.errSSLHostNameMismatch,
SecurityConst.errSSLInternal,
])
# In testing, only errSSLXCertChainInvalid was ever returned for
# all of these different situations, however we include the others
# for completeness. To get the real reason we have to use the
# certificate from the handshake and use the deprecated function
# SecTrustGetCssmResultCode().
if handshake_result in handshake_error_codes:
trust_ref_pointer = new(Security, 'SecTrustRef *')
result = Security.SSLCopyPeerTrust(
session_context,
trust_ref_pointer
)
handle_sec_error(result)
trust_ref = unwrap(trust_ref_pointer)
result_code_pointer = new(Security, 'OSStatus *')
result = Security.SecTrustGetCssmResultCode(trust_ref, result_code_pointer)
result_code = deref(result_code_pointer)
chain = extract_chain(self._server_hello)
self_signed = False
revoked = False
expired = False
not_yet_valid = False
no_issuer = False
cert = None
bad_hostname = False
if chain:
cert = chain[0]
oscrypto_cert = load_certificate(cert)
self_signed = oscrypto_cert.self_signed
revoked = result_code == SecurityConst.CSSMERR_TP_CERT_REVOKED
no_issuer = not self_signed and result_code == SecurityConst.CSSMERR_TP_NOT_TRUSTED
expired = result_code == SecurityConst.CSSMERR_TP_CERT_EXPIRED
not_yet_valid = result_code == SecurityConst.CSSMERR_TP_CERT_NOT_VALID_YET
bad_hostname = result_code == SecurityConst.CSSMERR_APPLETP_HOSTNAME_MISMATCH
# On macOS 10.12, some expired certificates return errSSLInternal
if osx_version_info >= (10, 12):
validity = cert['tbs_certificate']['validity']
not_before = validity['not_before'].chosen.native
not_after = validity['not_after'].chosen.native
utcnow = datetime.datetime.now(timezone.utc)
expired = not_after < utcnow
not_yet_valid = not_before > utcnow
if chain and chain[0].hash_algo in set(['md5', 'md2']):
raise_weak_signature(chain[0])
if revoked:
raise_revoked(cert)
if bad_hostname:
raise_hostname(cert, self._hostname)
elif expired or not_yet_valid:
raise_expired_not_yet_valid(cert)
elif no_issuer:
raise_no_issuer(cert)
elif self_signed:
raise_self_signed(cert)
if detect_client_auth_request(self._server_hello):
raise_client_auth()
raise_verification(cert)
if handshake_result == SecurityConst.errSSLPeerHandshakeFail:
if detect_client_auth_request(self._server_hello):
raise_client_auth()
raise_handshake()
if handshake_result == SecurityConst.errSSLWeakPeerEphemeralDHKey:
raise_dh_params()
if handshake_result == SecurityConst.errSSLPeerProtocolVersion:
raise_protocol_version()
if handshake_result in set([SecurityConst.errSSLRecordOverflow, SecurityConst.errSSLProtocol]):
self._server_hello += _read_remaining(self._socket)
raise_protocol_error(self._server_hello)
if handshake_result in set([SecurityConst.errSSLClosedNoNotify, SecurityConst.errSSLClosedAbort]):
if not self._done_handshake:
self._server_hello += _read_remaining(self._socket)
if detect_other_protocol(self._server_hello):
raise_protocol_error(self._server_hello)
raise_disconnection()
if osx_version_info < (10, 10):
dh_params_length = get_dh_params_length(self._server_hello)
if dh_params_length is not None and dh_params_length < 1024:
raise_dh_params()
would_block = handshake_result == SecurityConst.errSSLWouldBlock
server_auth_complete = handshake_result == SecurityConst.errSSLServerAuthCompleted
manual_validation = self._session._manual_validation and server_auth_complete
if not would_block and not manual_validation:
handle_sec_error(handshake_result, TLSError)
self._session_context = session_context
protocol_const_pointer = new(Security, 'SSLProtocol *')
result = Security.SSLGetNegotiatedProtocolVersion(
session_context,
protocol_const_pointer
)
handle_sec_error(result)
protocol_const = deref(protocol_const_pointer)
self._protocol = _PROTOCOL_CONST_STRING_MAP[protocol_const]
cipher_int_pointer = new(Security, 'SSLCipherSuite *')
result = Security.SSLGetNegotiatedCipher(
session_context,
cipher_int_pointer
)
handle_sec_error(result)
cipher_int = deref(cipher_int_pointer)
cipher_bytes = int_to_bytes(cipher_int, width=2)
self._cipher_suite = CIPHER_SUITE_MAP.get(cipher_bytes, cipher_bytes)
session_info = parse_session_info(
self._server_hello,
self._client_hello
)
self._compression = session_info['compression']
self._session_id = session_info['session_id']
self._session_ticket = session_info['session_ticket']
except (OSError, socket_.error):
if session_context:
if osx_version_info < (10, 8):
result = Security.SSLDisposeContext(session_context)
handle_sec_error(result)
else:
result = CoreFoundation.CFRelease(session_context)
handle_cf_error(result)
self._session_context = None
self.close()
raise
finally:
# Trying to release crl_search_ref or ocsp_search_ref results in
# a segmentation fault, so we do not do that
if ssl_policy_ref:
result = CoreFoundation.CFRelease(ssl_policy_ref)
handle_cf_error(result)
ssl_policy_ref = None
if crl_policy_ref:
result = CoreFoundation.CFRelease(crl_policy_ref)
handle_cf_error(result)
crl_policy_ref = None
if ocsp_policy_ref:
result = CoreFoundation.CFRelease(ocsp_policy_ref)
handle_cf_error(result)
ocsp_policy_ref = None
if policy_array_ref:
result = CoreFoundation.CFRelease(policy_array_ref)
handle_cf_error(result)
policy_array_ref = None
|
Perform an initial TLS handshake
|
def reorderChild(self, parent, newitem):
"""Reorder a list to match target by moving a sequence at a time.
Written for QtAbstractItemModel.moveRows.
"""
source = self.getItem(parent).childItems
target = newitem.childItems
i = 0
while i < len(source):
if source[i] == target[i]:
i += 1
continue
else:
i0 = i
j0 = source.index(target[i0])
j = j0 + 1
while j < len(source):
if source[j] == target[j - j0 + i0]:
j += 1
continue
else:
break
self.moveRows(parent, i0, j0, j - j0)
i += j - j0
|
Reorder a list to match target by moving a sequence at a time.
Written for QtAbstractItemModel.moveRows.
|
def set_duplicated_flag(self):
"""
For all package set flag duplicated, if it's not unique package
:return:
"""
package_by_name = defaultdict(list)
for package1 in self._root_package.all_packages:
if package1 is None:
continue
pkg_name = package1.package_name
param_list = self._config.get_fails('unique', {})
params1 = package1.get_params(param_list)
for package2 in package_by_name[pkg_name]:
params2 = package2.get_params(param_list)
for x in param_list:
# START HACK for cached archive
param1 = params1[x]
param2 = params2[x]
if isinstance(param1, list):
param1 = [str(x) for x in param1]
if isinstance(param2, list):
param2 = [str(x) for x in param2]
# END
if str(param1) != str(param2):
package1.duplicated = True
package2.duplicated = True
package_by_name[pkg_name].append(package1)
|
For all package set flag duplicated, if it's not unique package
:return:
|
def theme_color(self):
"""
A member of :ref:`MsoThemeColorIndex` or |None| if no theme color is
specified. When :attr:`type` is `MSO_COLOR_TYPE.THEME`, the value of
this property will always be a member of :ref:`MsoThemeColorIndex`.
When :attr:`type` has any other value, the value of this property is
|None|.
Assigning a member of :ref:`MsoThemeColorIndex` causes :attr:`type`
to become `MSO_COLOR_TYPE.THEME`. Any existing RGB value is retained
but ignored by Word. Assigning |None| causes any color specification
to be removed such that the effective color is inherited from the
style hierarchy.
"""
color = self._color
if color is None or color.themeColor is None:
return None
return color.themeColor
|
A member of :ref:`MsoThemeColorIndex` or |None| if no theme color is
specified. When :attr:`type` is `MSO_COLOR_TYPE.THEME`, the value of
this property will always be a member of :ref:`MsoThemeColorIndex`.
When :attr:`type` has any other value, the value of this property is
|None|.
Assigning a member of :ref:`MsoThemeColorIndex` causes :attr:`type`
to become `MSO_COLOR_TYPE.THEME`. Any existing RGB value is retained
but ignored by Word. Assigning |None| causes any color specification
to be removed such that the effective color is inherited from the
style hierarchy.
|
def add_program(self, name=None):
"""Create a program and add it to this MultiProgram.
It is the caller's responsibility to keep a reference to the returned
program.
The *name* must be unique, but is otherwise arbitrary and used for
debugging purposes.
"""
if name is None:
name = 'program' + str(self._next_prog_id)
self._next_prog_id += 1
if name in self._programs:
raise KeyError("Program named '%s' already exists." % name)
# create a program and update it to look like the rest
prog = ModularProgram(self._vcode, self._fcode)
for key, val in self._set_items.items():
prog[key] = val
self.frag._new_program(prog)
self.vert._new_program(prog)
self._programs[name] = prog
return prog
|
Create a program and add it to this MultiProgram.
It is the caller's responsibility to keep a reference to the returned
program.
The *name* must be unique, but is otherwise arbitrary and used for
debugging purposes.
|
def _coerce_dtype(self, other_dtype):
"""Possibly change the bin content type to allow correct operations with other operand.
Parameters
----------
other_dtype : np.dtype or type
"""
if self._dtype is None:
new_dtype = np.dtype(other_dtype)
else:
new_dtype = np.find_common_type([self._dtype, np.dtype(other_dtype)], [])
if new_dtype != self.dtype:
self.set_dtype(new_dtype)
|
Possibly change the bin content type to allow correct operations with other operand.
Parameters
----------
other_dtype : np.dtype or type
|
def check_overlap(self, other, wavelengths=None, threshold=0.01):
"""Check for wavelength overlap between two spectra.
Only wavelengths where ``self`` throughput is non-zero
are considered.
Example of full overlap::
|---------- other ----------|
|------ self ------|
Examples of partial overlap::
|---------- self ----------|
|------ other ------|
|---- other ----|
|---- self ----|
|---- self ----|
|---- other ----|
Examples of no overlap::
|---- self ----| |---- other ----|
|---- other ----| |---- self ----|
Parameters
----------
other : `BaseSpectrum`
wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None`
Wavelength values for integration.
If not a Quantity, assumed to be in Angstrom.
If `None`, `waveset` is used.
threshold : float
If less than this fraction of flux or throughput falls
outside wavelength overlap, the *lack* of overlap is
*insignificant*. This is only used when partial overlap
is detected. Default is 1%.
Returns
-------
result : {'full', 'partial_most', 'partial_notmost', 'none'}
* 'full' - ``self`` coverage is within or same as ``other``
* 'partial_most' - Less than ``threshold`` fraction of
``self`` flux is outside the overlapping wavelength
region, i.e., the *lack* of overlap is *insignificant*
* 'partial_notmost' - ``self`` partially overlaps with
``other`` but does not qualify for 'partial_most'
* 'none' - ``self`` does not overlap ``other``
Raises
------
synphot.exceptions.SynphotError
Invalid inputs.
"""
if not isinstance(other, BaseSpectrum):
raise exceptions.SynphotError(
'other must be spectrum or bandpass.')
# Special cases where no sampling wavelengths given and
# one of the inputs is continuous.
if wavelengths is None:
if other.waveset is None:
return 'full'
if self.waveset is None:
return 'partial_notmost'
x1 = self._validate_wavelengths(wavelengths)
y1 = self(x1)
a = x1[y1 > 0].value
b = other._validate_wavelengths(wavelengths).value
result = utils.overlap_status(a, b)
if result == 'partial':
# If there is no need to extrapolate or taper other
# (i.e., other is zero at self's wave limits),
# then we consider it as a full coverage.
# This logic assumes __call__ never returns mag or count!
if ((isinstance(other.model, Empirical1D) and
other.model.is_tapered() or
not isinstance(other.model,
(Empirical1D, _CompoundModel))) and
np.allclose(other(x1[::x1.size - 1]).value, 0)):
result = 'full'
# Check if the lack of overlap is significant.
else:
# Get all the flux
totalflux = self.integrate(wavelengths=wavelengths).value
utils.validate_totalflux(totalflux)
a_min, a_max = a.min(), a.max()
b_min, b_max = b.min(), b.max()
# Now get the other two pieces
excluded = 0.0
if a_min < b_min:
excluded += self.integrate(
wavelengths=np.array([a_min, b_min])).value
if a_max > b_max:
excluded += self.integrate(
wavelengths=np.array([b_max, a_max])).value
if excluded / totalflux < threshold:
result = 'partial_most'
else:
result = 'partial_notmost'
return result
|
Check for wavelength overlap between two spectra.
Only wavelengths where ``self`` throughput is non-zero
are considered.
Example of full overlap::
|---------- other ----------|
|------ self ------|
Examples of partial overlap::
|---------- self ----------|
|------ other ------|
|---- other ----|
|---- self ----|
|---- self ----|
|---- other ----|
Examples of no overlap::
|---- self ----| |---- other ----|
|---- other ----| |---- self ----|
Parameters
----------
other : `BaseSpectrum`
wavelengths : array-like, `~astropy.units.quantity.Quantity`, or `None`
Wavelength values for integration.
If not a Quantity, assumed to be in Angstrom.
If `None`, `waveset` is used.
threshold : float
If less than this fraction of flux or throughput falls
outside wavelength overlap, the *lack* of overlap is
*insignificant*. This is only used when partial overlap
is detected. Default is 1%.
Returns
-------
result : {'full', 'partial_most', 'partial_notmost', 'none'}
* 'full' - ``self`` coverage is within or same as ``other``
* 'partial_most' - Less than ``threshold`` fraction of
``self`` flux is outside the overlapping wavelength
region, i.e., the *lack* of overlap is *insignificant*
* 'partial_notmost' - ``self`` partially overlaps with
``other`` but does not qualify for 'partial_most'
* 'none' - ``self`` does not overlap ``other``
Raises
------
synphot.exceptions.SynphotError
Invalid inputs.
|
def pyeapi_config(commands=None,
config_file=None,
template_engine='jinja',
context=None,
defaults=None,
saltenv='base',
**kwargs):
'''
.. versionadded:: 2019.2.0
Configures the Arista switch with the specified commands, via the ``pyeapi``
library. This function forwards the existing connection details to the
:mod:`pyeapi.run_commands <salt.module.arista_pyeapi.run_commands>`
execution function.
commands
The list of configuration commands to load on the Arista switch.
.. note::
This argument is ignored when ``config_file`` is specified.
config_file
The source file with the configuration commands to be sent to the device.
The file can also be a template that can be rendered using the template
engine of choice. This can be specified using the absolute path to the
file, or using one of the following URL schemes:
- ``salt://``
- ``https://``
- ``ftp:/``
- ``s3:/``
- ``swift://``
template_engine: ``jinja``
The template engine to use when rendering the source file. Default:
``jinja``. To simply fetch the file without attempting to render, set
this argument to ``None``.
context: ``None``
Variables to add to the template context.
defaults: ``None``
Default values of the ``context`` dict.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file. Ignored if
``config_file`` is not a ``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' napalm.pyeapi_config 'ntp server 1.2.3.4'
'''
pyeapi_kwargs = pyeapi_nxos_api_args(**kwargs)
return __salt__['pyeapi.config'](commands=commands,
config_file=config_file,
template_engine=template_engine,
context=context,
defaults=defaults,
saltenv=saltenv,
**pyeapi_kwargs)
|
.. versionadded:: 2019.2.0
Configures the Arista switch with the specified commands, via the ``pyeapi``
library. This function forwards the existing connection details to the
:mod:`pyeapi.run_commands <salt.module.arista_pyeapi.run_commands>`
execution function.
commands
The list of configuration commands to load on the Arista switch.
.. note::
This argument is ignored when ``config_file`` is specified.
config_file
The source file with the configuration commands to be sent to the device.
The file can also be a template that can be rendered using the template
engine of choice. This can be specified using the absolute path to the
file, or using one of the following URL schemes:
- ``salt://``
- ``https://``
- ``ftp:/``
- ``s3:/``
- ``swift://``
template_engine: ``jinja``
The template engine to use when rendering the source file. Default:
``jinja``. To simply fetch the file without attempting to render, set
this argument to ``None``.
context: ``None``
Variables to add to the template context.
defaults: ``None``
Default values of the ``context`` dict.
saltenv: ``base``
Salt fileserver environment from which to retrieve the file. Ignored if
``config_file`` is not a ``salt://`` URL.
CLI Example:
.. code-block:: bash
salt '*' napalm.pyeapi_config 'ntp server 1.2.3.4'
|
def show_xticklabels(self, row, column):
"""Show the x-axis tick labels for a subplot.
:param row,column: specify the subplot.
"""
subplot = self.get_subplot_at(row, column)
subplot.show_xticklabels()
|
Show the x-axis tick labels for a subplot.
:param row,column: specify the subplot.
|
def env_string(name, required=False, default=empty):
"""Pulls an environment variable out of the environment returning it as a
string. If not present in the environment and no default is specified, an
empty string is returned.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
"""
value = get_env_value(name, default=default, required=required)
if value is empty:
value = ''
return value
|
Pulls an environment variable out of the environment returning it as a
string. If not present in the environment and no default is specified, an
empty string is returned.
:param name: The name of the environment variable be pulled
:type name: str
:param required: Whether the environment variable is required. If ``True``
and the variable is not present, a ``KeyError`` is raised.
:type required: bool
:param default: The value to return if the environment variable is not
present. (Providing a default alongside setting ``required=True`` will raise
a ``ValueError``)
:type default: bool
|
def load_spectrum(path, smoothing=181, DF=-8.):
"""Load a Phoenix model atmosphere spectrum.
path : string
The file path to load.
smoothing : integer
Smoothing to apply. If None, do not smooth. If an integer, smooth with a
Hamming window. Otherwise, the variable is assumed to be a different
smoothing window, and the data will be convolved with it.
DF: float
Numerical factor used to compute the emergent flux density.
Returns a Pandas DataFrame containing the columns:
wlen
Sample wavelength in Angstrom.
flam
Flux density in erg/cm²/s/Å. See `pwkit.synphot` for related tools.
The values of *flam* returned by this function are computed from the
second column of the data file as specified in the documentation: ``flam =
10**(col2 + DF)``. The documentation states that the default value, -8, is
appropriate for most modern models; but some older models use other
values.
Loading takes about 5 seconds on my current laptop. Un-smoothed spectra
have about 630,000 samples.
"""
try:
ang, lflam = np.loadtxt(path, usecols=(0,1)).T
except ValueError:
# In some files, the numbers in the first columns fill up the
# whole 12-character column width, and are given in exponential
# notation with a 'D' character, so we must be more careful:
with open(path, 'rb') as f:
def lines():
for line in f:
yield line.replace(b'D', b'e')
ang, lflam = np.genfromtxt(lines(), delimiter=(13, 12)).T
# Data files do not come sorted!
z = ang.argsort()
ang = ang[z]
flam = 10**(lflam[z] + DF)
del z
if smoothing is not None:
if isinstance(smoothing, int):
smoothing = np.hamming(smoothing)
else:
smoothing = np.asarray(smoothing)
wnorm = np.convolve(np.ones_like(smoothing), smoothing, mode='valid')
smoothing = smoothing / wnorm # do not alter original array.
smooth = lambda a: np.convolve(a, smoothing, mode='valid')[::smoothing.size]
ang = smooth(ang)
flam = smooth(flam)
return pd.DataFrame({'wlen': ang, 'flam': flam})
|
Load a Phoenix model atmosphere spectrum.
path : string
The file path to load.
smoothing : integer
Smoothing to apply. If None, do not smooth. If an integer, smooth with a
Hamming window. Otherwise, the variable is assumed to be a different
smoothing window, and the data will be convolved with it.
DF: float
Numerical factor used to compute the emergent flux density.
Returns a Pandas DataFrame containing the columns:
wlen
Sample wavelength in Angstrom.
flam
Flux density in erg/cm²/s/Å. See `pwkit.synphot` for related tools.
The values of *flam* returned by this function are computed from the
second column of the data file as specified in the documentation: ``flam =
10**(col2 + DF)``. The documentation states that the default value, -8, is
appropriate for most modern models; but some older models use other
values.
Loading takes about 5 seconds on my current laptop. Un-smoothed spectra
have about 630,000 samples.
|
def get_bool(_bytearray, byte_index, bool_index):
"""
Get the boolean value from location in bytearray
"""
index_value = 1 << bool_index
byte_value = _bytearray[byte_index]
current_value = byte_value & index_value
return current_value == index_value
|
Get the boolean value from location in bytearray
|
def format_status(self, width=None,
label_width=None,
progress_width=None,
summary_width=None):
"""Generate the formatted status bar string."""
if width is None: # pragma: no cover
width = shutil.get_terminal_size()[0]
if label_width is None:
label_width = len(self.label)
if summary_width is None:
summary_width = self.summary_width()
if progress_width is None:
progress_width = width - label_width - summary_width - 2
if len(self.label) > label_width:
# FIXME: This actually *will* break if we ever have fewer than
# three characters assigned to format the label, but that would
# be an extreme situation so I won't fix it just yet.
label = self.label[:label_width - 3] + "..."
else:
label_format = "{{label:{fill_char}<{width}}}".format(
width=label_width,
fill_char=self.fill_char)
label = label_format.format(label=self.label)
summary_format = "{{:>{width}}}".format(width=summary_width)
summary = summary_format.format(self._progress.format_summary())
progress = self._progress.format_progress(width=progress_width)
return "{label} {progress} {summary}".format(
label=label,
progress=progress,
summary=summary
)
|
Generate the formatted status bar string.
|
def split_address(address):
"""
Returns (host, port) with an integer port from the specified address
string. (None, None) is returned if the address is invalid.
"""
invalid = None, None
if not address and address != 0:
return invalid
components = str(address).split(':')
if len(components) > 2:
return invalid
if components[0] and not valid_hostname(components[0]):
return invalid
if len(components) == 2 and not valid_port(components[1]):
return invalid
if len(components) == 1:
components.insert(0 if valid_port(components[0]) else 1, None)
host, port = components
port = int(port) if port else None
return host, port
|
Returns (host, port) with an integer port from the specified address
string. (None, None) is returned if the address is invalid.
|
def send_terrain_data(self):
'''send some terrain data'''
for bit in range(56):
if self.current_request.mask & (1<<bit) and self.sent_mask & (1<<bit) == 0:
self.send_terrain_data_bit(bit)
return
# no bits to send
self.current_request = None
self.sent_mask = 0
|
send some terrain data
|
def capture_termination_signal(please_stop):
"""
WILL SIGNAL please_stop WHEN THIS AWS INSTANCE IS DUE FOR SHUTDOWN
"""
def worker(please_stop):
seen_problem = False
while not please_stop:
request_time = (time.time() - timer.START)/60 # MINUTES
try:
response = requests.get("http://169.254.169.254/latest/meta-data/spot/termination-time")
seen_problem = False
if response.status_code not in [400, 404]:
Log.alert("Shutdown AWS Spot Node {{name}} {{type}}", name=machine_metadata.name, type=machine_metadata.aws_instance_type)
please_stop.go()
except Exception as e:
e = Except.wrap(e)
if "Failed to establish a new connection: [Errno 10060]" in e or "A socket operation was attempted to an unreachable network" in e:
Log.note("AWS Spot Detection has shutdown, probably not a spot node, (http://169.254.169.254 is unreachable)")
return
elif seen_problem:
# IGNORE THE FIRST PROBLEM
Log.warning("AWS shutdown detection has more than one consecutive problem: (last request {{time|round(1)}} minutes since startup)", time=request_time, cause=e)
seen_problem = True
(Till(seconds=61) | please_stop).wait()
(Till(seconds=11) | please_stop).wait()
Thread.run("listen for termination", worker)
|
WILL SIGNAL please_stop WHEN THIS AWS INSTANCE IS DUE FOR SHUTDOWN
|
def MACRO_DEFINITION(self, cursor):
"""
Parse MACRO_DEFINITION, only present if the TranslationUnit is
used with TranslationUnit.PARSE_DETAILED_PROCESSING_RECORD.
"""
# TODO: optionalize macro parsing. It takes a LOT of time.
# ignore system macro
if (not hasattr(cursor, 'location') or cursor.location is None or
cursor.location.file is None):
return False
name = self.get_unique_name(cursor)
# if name == 'A':
# code.interact(local=locals())
# Tokens !!! .kind = {IDENTIFIER, KEYWORD, LITERAL, PUNCTUATION,
# COMMENT ? } etc. see TokenKinds.def
comment = None
tokens = self._literal_handling(cursor)
# Macro name is tokens[0]
# get Macro value(s)
value = True
if isinstance(tokens, list):
if len(tokens) == 2:
value = tokens[1]
else:
# just merge the list of tokens
value = ''.join(tokens[1:])
# macro comment maybe in tokens. Not in cursor.raw_comment
for t in cursor.get_tokens():
if t.kind == TokenKind.COMMENT:
comment = t.spelling
# special case. internal __null
# FIXME, there are probable a lot of others.
# why not Cursor.kind GNU_NULL_EXPR child instead of a token ?
if name == 'NULL' or value == '__null':
value = None
log.debug('MACRO: #define %s %s', tokens[0], value)
obj = typedesc.Macro(name, None, value)
try:
self.register(name, obj)
except DuplicateDefinitionException:
log.info(
'Redefinition of %s %s->%s',
name, self.parser.all[name].args, value)
# HACK
self.parser.all[name] = obj
self.set_location(obj, cursor)
# set the comment in the obj
obj.comment = comment
return True
|
Parse MACRO_DEFINITION, only present if the TranslationUnit is
used with TranslationUnit.PARSE_DETAILED_PROCESSING_RECORD.
|
def get_persistent_boot_device(self):
"""Get current persistent boot device set for the host
:returns: persistent boot device for the system
:raises: IloError, on an error from iLO.
"""
sushy_system = self._get_sushy_system(PROLIANT_SYSTEM_ID)
# Return boot device if it is persistent.
if ((sushy_system.
boot.enabled) == sushy.BOOT_SOURCE_ENABLED_CONTINUOUS):
return PERSISTENT_BOOT_MAP.get(sushy_system.boot.target)
# Check if we are in BIOS boot mode.
# There is no resource to fetch boot device order for BIOS boot mode
if not self._is_boot_mode_uefi():
return None
try:
boot_device = (sushy_system.bios_settings.boot_settings.
get_persistent_boot_device())
return PERSISTENT_BOOT_MAP.get(boot_device)
except sushy.exceptions.SushyError as e:
msg = (self._("The Redfish controller is unable to get "
"persistent boot device. Error %(error)s") %
{'error': str(e)})
LOG.debug(msg)
raise exception.IloError(msg)
|
Get current persistent boot device set for the host
:returns: persistent boot device for the system
:raises: IloError, on an error from iLO.
|
def libvlc_video_get_adjust_float(p_mi, option):
'''Get float adjust option.
@param p_mi: libvlc media player instance.
@param option: adjust option to get, values of libvlc_video_adjust_option_t.
@version: LibVLC 1.1.1 and later.
'''
f = _Cfunctions.get('libvlc_video_get_adjust_float', None) or \
_Cfunction('libvlc_video_get_adjust_float', ((1,), (1,),), None,
ctypes.c_float, MediaPlayer, ctypes.c_uint)
return f(p_mi, option)
|
Get float adjust option.
@param p_mi: libvlc media player instance.
@param option: adjust option to get, values of libvlc_video_adjust_option_t.
@version: LibVLC 1.1.1 and later.
|
def port_profile_domain_profile_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile_domain = ET.SubElement(config, "port-profile-domain", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
port_profile_domain_name_key = ET.SubElement(port_profile_domain, "port-profile-domain-name")
port_profile_domain_name_key.text = kwargs.pop('port_profile_domain_name')
profile = ET.SubElement(port_profile_domain, "profile")
profile_name = ET.SubElement(profile, "profile-name")
profile_name.text = kwargs.pop('profile_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def empirical_sinkhorn_divergence(X_s, X_t, reg, a=None, b=None, metric='sqeuclidean', numIterMax=10000, stopThr=1e-9, verbose=False, log=False, **kwargs):
'''
Compute the sinkhorn divergence loss from empirical data
The function solves the following optimization problems and return the
sinkhorn divergence :math:`S`:
.. math::
W &= \min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
W_a &= \min_{\gamma_a} <\gamma_a,M_a>_F + reg\cdot\Omega(\gamma_a)
W_b &= \min_{\gamma_b} <\gamma_b,M_b>_F + reg\cdot\Omega(\gamma_b)
S &= W - 1/2 * (W_a + W_b)
.. math::
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
\gamma_a 1 = a
\gamma_a^T 1= a
\gamma_a\geq 0
\gamma_b 1 = b
\gamma_b^T 1= b
\gamma_b\geq 0
where :
- :math:`M` (resp. :math:`M_a, M_b`) is the (ns,nt) metric cost matrix (resp (ns, ns) and (nt, nt))
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- :math:`a` and :math:`b` are source and target weights (sum to 1)
Parameters
----------
X_s : np.ndarray (ns, d)
samples in the source domain
X_t : np.ndarray (nt, d)
samples in the target domain
reg : float
Regularization term >0
a : np.ndarray (ns,)
samples weights in the source domain
b : np.ndarray (nt,)
samples weights in the target domain
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (ns x nt) ndarray
Regularized optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
Examples
--------
>>> n_s = 2
>>> n_t = 4
>>> reg = 0.1
>>> X_s = np.reshape(np.arange(n_s), (n_s, 1))
>>> X_t = np.reshape(np.arange(0, n_t), (n_t, 1))
>>> emp_sinkhorn_div = empirical_sinkhorn_divergence(X_s, X_t, reg)
>>> print(emp_sinkhorn_div)
>>> [2.99977435]
References
----------
.. [23] Aude Genevay, Gabriel Peyré, Marco Cuturi, Learning Generative Models with Sinkhorn Divergences, Proceedings of the Twenty-First International Conference on Artficial Intelligence and Statistics, (AISTATS) 21, 2018
'''
if log:
sinkhorn_loss_ab, log_ab = empirical_sinkhorn2(X_s, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs)
sinkhorn_loss_a, log_a = empirical_sinkhorn2(X_s, X_s, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs)
sinkhorn_loss_b, log_b = empirical_sinkhorn2(X_t, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs)
sinkhorn_div = sinkhorn_loss_ab - 1 / 2 * (sinkhorn_loss_a + sinkhorn_loss_b)
log = {}
log['sinkhorn_loss_ab'] = sinkhorn_loss_ab
log['sinkhorn_loss_a'] = sinkhorn_loss_a
log['sinkhorn_loss_b'] = sinkhorn_loss_b
log['log_sinkhorn_ab'] = log_ab
log['log_sinkhorn_a'] = log_a
log['log_sinkhorn_b'] = log_b
return max(0, sinkhorn_div), log
else:
sinkhorn_loss_ab = empirical_sinkhorn2(X_s, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs)
sinkhorn_loss_a = empirical_sinkhorn2(X_s, X_s, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs)
sinkhorn_loss_b = empirical_sinkhorn2(X_t, X_t, reg, a, b, metric=metric, numIterMax=numIterMax, stopThr=1e-9, verbose=verbose, log=log, **kwargs)
sinkhorn_div = sinkhorn_loss_ab - 1 / 2 * (sinkhorn_loss_a + sinkhorn_loss_b)
return max(0, sinkhorn_div)
|
Compute the sinkhorn divergence loss from empirical data
The function solves the following optimization problems and return the
sinkhorn divergence :math:`S`:
.. math::
W &= \min_\gamma <\gamma,M>_F + reg\cdot\Omega(\gamma)
W_a &= \min_{\gamma_a} <\gamma_a,M_a>_F + reg\cdot\Omega(\gamma_a)
W_b &= \min_{\gamma_b} <\gamma_b,M_b>_F + reg\cdot\Omega(\gamma_b)
S &= W - 1/2 * (W_a + W_b)
.. math::
s.t. \gamma 1 = a
\gamma^T 1= b
\gamma\geq 0
\gamma_a 1 = a
\gamma_a^T 1= a
\gamma_a\geq 0
\gamma_b 1 = b
\gamma_b^T 1= b
\gamma_b\geq 0
where :
- :math:`M` (resp. :math:`M_a, M_b`) is the (ns,nt) metric cost matrix (resp (ns, ns) and (nt, nt))
- :math:`\Omega` is the entropic regularization term :math:`\Omega(\gamma)=\sum_{i,j} \gamma_{i,j}\log(\gamma_{i,j})`
- :math:`a` and :math:`b` are source and target weights (sum to 1)
Parameters
----------
X_s : np.ndarray (ns, d)
samples in the source domain
X_t : np.ndarray (nt, d)
samples in the target domain
reg : float
Regularization term >0
a : np.ndarray (ns,)
samples weights in the source domain
b : np.ndarray (nt,)
samples weights in the target domain
numItermax : int, optional
Max number of iterations
stopThr : float, optional
Stop threshol on error (>0)
verbose : bool, optional
Print information along iterations
log : bool, optional
record log if True
Returns
-------
gamma : (ns x nt) ndarray
Regularized optimal transportation matrix for the given parameters
log : dict
log dictionary return only if log==True in parameters
Examples
--------
>>> n_s = 2
>>> n_t = 4
>>> reg = 0.1
>>> X_s = np.reshape(np.arange(n_s), (n_s, 1))
>>> X_t = np.reshape(np.arange(0, n_t), (n_t, 1))
>>> emp_sinkhorn_div = empirical_sinkhorn_divergence(X_s, X_t, reg)
>>> print(emp_sinkhorn_div)
>>> [2.99977435]
References
----------
.. [23] Aude Genevay, Gabriel Peyré, Marco Cuturi, Learning Generative Models with Sinkhorn Divergences, Proceedings of the Twenty-First International Conference on Artficial Intelligence and Statistics, (AISTATS) 21, 2018
|
def set_attribute(self, key, value):
'''
Add or update the value of an attribute.
'''
if isinstance(key, int):
self.children[key] = value
elif isinstance(key, basestring):
self.attributes[key] = value
else:
raise TypeError('Only integer and string types are valid for assigning '
'child tags and attributes, respectively.')
|
Add or update the value of an attribute.
|
def get_voltage(self, channel):
""" channel: 1=OP1, 2=OP2, AUX is not supported"""
ret = self.ask("V%dO?" % channel)
if ret[-1] != "V":
print("ttiQl355tp.get_voltage() format error", ret)
return None
return float(ret[:-1])
|
channel: 1=OP1, 2=OP2, AUX is not supported
|
def dialog_mode(self, dialog_mode):
"""Switch on/off the speaker's dialog mode.
:param dialog_mode: Enable or disable dialog mode
:type dialog_mode: bool
:raises NotSupportedException: If the device does not support
dialog mode.
"""
if not self.is_soundbar:
message = 'This device does not support dialog mode'
raise NotSupportedException(message)
self.renderingControl.SetEQ([
('InstanceID', 0),
('EQType', 'DialogLevel'),
('DesiredValue', int(dialog_mode))
])
|
Switch on/off the speaker's dialog mode.
:param dialog_mode: Enable or disable dialog mode
:type dialog_mode: bool
:raises NotSupportedException: If the device does not support
dialog mode.
|
def n_point_crossover(random, mom, dad, args):
"""Return the offspring of n-point crossover on the candidates.
This function performs n-point crossover (NPX). It selects *n*
random points without replacement at which to 'cut' the candidate
solutions and recombine them.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *num_crossover_points* -- the number of crossover points used (default 1)
"""
crossover_rate = args.setdefault('crossover_rate', 1.0)
num_crossover_points = args.setdefault('num_crossover_points', 1)
children = []
if random.random() < crossover_rate:
num_cuts = min(len(mom)-1, num_crossover_points)
cut_points = random.sample(range(1, len(mom)), num_cuts)
cut_points.sort()
bro = copy.copy(dad)
sis = copy.copy(mom)
normal = True
for i, (m, d) in enumerate(zip(mom, dad)):
if i in cut_points:
normal = not normal
if not normal:
bro[i] = m
sis[i] = d
normal = not normal
children.append(bro)
children.append(sis)
else:
children.append(mom)
children.append(dad)
return children
|
Return the offspring of n-point crossover on the candidates.
This function performs n-point crossover (NPX). It selects *n*
random points without replacement at which to 'cut' the candidate
solutions and recombine them.
.. Arguments:
random -- the random number generator object
mom -- the first parent candidate
dad -- the second parent candidate
args -- a dictionary of keyword arguments
Optional keyword arguments in args:
- *crossover_rate* -- the rate at which crossover is performed
(default 1.0)
- *num_crossover_points* -- the number of crossover points used (default 1)
|
def _get_function_transitions(self,
expression: Union[str, List],
expected_type: PredicateType) -> Tuple[List[str],
PredicateType,
List[PredicateType]]:
"""
A helper method for ``_get_transitions``. This gets the transitions for the predicate
itself in a function call. If we only had simple functions (e.g., "(add 2 3)"), this would
be pretty straightforward and we wouldn't need a separate method to handle it. We split it
out into its own method because handling higher-order functions is complicated (e.g.,
something like "((negate add) 2 3)").
"""
# This first block handles getting the transitions and function type (and some error
# checking) _just for the function itself_. If this is a simple function, this is easy; if
# it's a higher-order function, it involves some recursion.
if isinstance(expression, list):
# This is a higher-order function. TODO(mattg): we'll just ignore type checking on
# higher-order functions, for now.
transitions, function_type = self._get_transitions(expression, None)
elif expression in self._functions:
name = expression
function_types = self._function_types[expression]
if len(function_types) != 1:
raise ParsingError(f"{expression} had multiple types; this is not yet supported for functions")
function_type = function_types[0]
transitions = [f'{function_type} -> {name}']
else:
if isinstance(expression, str):
raise ParsingError(f"Unrecognized function: {expression[0]}")
else:
raise ParsingError(f"Unsupported expression type: {expression}")
if not isinstance(function_type, FunctionType):
raise ParsingError(f'Zero-arg function or constant called with arguments: {name}')
# Now that we have the transitions for the function itself, and the function's type, we can
# get argument types and do the rest of the transitions.
argument_types = function_type.argument_types
return_type = function_type.return_type
right_side = f'[{function_type}, {", ".join(str(arg) for arg in argument_types)}]'
first_transition = f'{return_type} -> {right_side}'
transitions.insert(0, first_transition)
if expected_type and expected_type != return_type:
raise ParsingError(f'{expression} did not have expected type {expected_type} '
f'(found {return_type})')
return transitions, return_type, argument_types
|
A helper method for ``_get_transitions``. This gets the transitions for the predicate
itself in a function call. If we only had simple functions (e.g., "(add 2 3)"), this would
be pretty straightforward and we wouldn't need a separate method to handle it. We split it
out into its own method because handling higher-order functions is complicated (e.g.,
something like "((negate add) 2 3)").
|
def scan(self, string):
""" Returns True if search(Sentence(string)) may yield matches.
If is often faster to scan prior to creating a Sentence and searching it.
"""
# In the following example, first scan the string for "good" and "bad":
# p = Pattern.fromstring("good|bad NN")
# for s in open("parsed.txt"):
# if p.scan(s):
# s = Sentence(s)
# m = p.search(s)
# if m:
# print(m)
w = (constraint.words for constraint in self.sequence if not constraint.optional)
w = itertools.chain(*w)
w = [w.strip(WILDCARD) for w in w if WILDCARD not in w[1:-1]]
if w and not any(w in string.lower() for w in w):
return False
return True
|
Returns True if search(Sentence(string)) may yield matches.
If is often faster to scan prior to creating a Sentence and searching it.
|
def replace(state, host, name, match, replace, flags=None):
'''
A simple shortcut for replacing text in files with sed.
+ name: target remote file to edit
+ match: text/regex to match for
+ replace: text to replace with
+ flags: list of flaggs to pass to sed
'''
yield sed_replace(name, match, replace, flags=flags)
|
A simple shortcut for replacing text in files with sed.
+ name: target remote file to edit
+ match: text/regex to match for
+ replace: text to replace with
+ flags: list of flaggs to pass to sed
|
async def georadius(self, name, longitude, latitude, radius, unit=None,
withdist=False, withcoord=False, withhash=False, count=None,
sort=None, store=None, store_dist=None):
"""
Return the members of the specified key identified by the
``name`` argument which are within the borders of the area specified
with the ``latitude`` and ``longitude`` location and the maximum
distance from the center specified by the ``radius`` value.
The units must be one of the following : m, km mi, ft. By default
``withdist`` indicates to return the distances of each place.
``withcoord`` indicates to return the latitude and longitude of
each place.
``withhash`` indicates to return the geohash string of each place.
``count`` indicates to return the number of elements up to N.
``sort`` indicates to return the places in a sorted way, ASC for
nearest to fairest and DESC for fairest to nearest.
``store`` indicates to save the places names in a sorted set named
with a specific key, each element of the destination sorted set is
populated with the score got from the original geo sorted set.
``store_dist`` indicates to save the places names in a sorted set
named with a specific key, instead of ``store`` the sorted set
destination score is set with the distance.
"""
return await self._georadiusgeneric('GEORADIUS',
name, longitude, latitude, radius,
unit=unit, withdist=withdist,
withcoord=withcoord, withhash=withhash,
count=count, sort=sort, store=store,
store_dist=store_dist)
|
Return the members of the specified key identified by the
``name`` argument which are within the borders of the area specified
with the ``latitude`` and ``longitude`` location and the maximum
distance from the center specified by the ``radius`` value.
The units must be one of the following : m, km mi, ft. By default
``withdist`` indicates to return the distances of each place.
``withcoord`` indicates to return the latitude and longitude of
each place.
``withhash`` indicates to return the geohash string of each place.
``count`` indicates to return the number of elements up to N.
``sort`` indicates to return the places in a sorted way, ASC for
nearest to fairest and DESC for fairest to nearest.
``store`` indicates to save the places names in a sorted set named
with a specific key, each element of the destination sorted set is
populated with the score got from the original geo sorted set.
``store_dist`` indicates to save the places names in a sorted set
named with a specific key, instead of ``store`` the sorted set
destination score is set with the distance.
|
def create_file_combobox(self, text, choices, option, default=NoDefault,
tip=None, restart=False, filters=None,
adjust_to_contents=False,
default_line_edit=False):
"""choices: couples (name, key)"""
combobox = FileComboBox(self, adjust_to_contents=adjust_to_contents,
default_line_edit=default_line_edit)
combobox.restart_required = restart
combobox.label_text = text
edit = combobox.lineEdit()
edit.label_text = text
edit.restart_required = restart
self.lineedits[edit] = (option, default)
if tip is not None:
combobox.setToolTip(tip)
combobox.addItems(choices)
msg = _('Invalid file path')
self.validate_data[edit] = (osp.isfile, msg)
browse_btn = QPushButton(ima.icon('FileIcon'), '', self)
browse_btn.setToolTip(_("Select file"))
browse_btn.clicked.connect(lambda: self.select_file(edit, filters))
layout = QGridLayout()
layout.addWidget(combobox, 0, 0, 0, 9)
layout.addWidget(browse_btn, 0, 10)
layout.setContentsMargins(0, 0, 0, 0)
widget = QWidget(self)
widget.combobox = combobox
widget.browse_btn = browse_btn
widget.setLayout(layout)
return widget
|
choices: couples (name, key)
|
def address_from_public_key(pk_bytes):
"""Returns the base32-encoded version of pk_bytes (G...)
"""
final_bytes = bytearray()
# version
final_bytes.append(6 << 3)
# public key
final_bytes.extend(pk_bytes)
# checksum
final_bytes.extend(struct.pack("<H", _crc16_checksum(final_bytes)))
return base64.b32encode(final_bytes).decode()
|
Returns the base32-encoded version of pk_bytes (G...)
|
def get_metrics(self):
"""Calculate ratio_comment_to_code and return with the other values"""
if(self.sloc == 0):
if(self.comments == 0):
ratio_comment_to_code = 0.00
else:
ratio_comment_to_code = 1.00
else:
ratio_comment_to_code = float(self.comments) / self.sloc
metrics = OrderedDict([('sloc', self.sloc), ('comments', self.comments),
('ratio_comment_to_code', round(ratio_comment_to_code, 2))])
return metrics
|
Calculate ratio_comment_to_code and return with the other values
|
def write_backreferences(seen_backrefs, gallery_conf,
target_dir, fname, snippet):
"""Writes down back reference files, which include a thumbnail list
of examples using a certain module"""
if gallery_conf['backreferences_dir'] is None:
return
example_file = os.path.join(target_dir, fname)
backrefs = scan_used_functions(example_file, gallery_conf)
for backref in backrefs:
include_path = os.path.join(gallery_conf['src_dir'],
gallery_conf['backreferences_dir'],
'%s.examples.new' % backref)
seen = backref in seen_backrefs
with codecs.open(include_path, 'a' if seen else 'w',
encoding='utf-8') as ex_file:
if not seen:
heading = '\n\nExamples using ``%s``' % backref
ex_file.write(heading + '\n')
ex_file.write('^' * len(heading) + '\n')
ex_file.write(_thumbnail_div(target_dir, gallery_conf['src_dir'],
fname, snippet, is_backref=True))
seen_backrefs.add(backref)
|
Writes down back reference files, which include a thumbnail list
of examples using a certain module
|
def DeregisterMountPoint(cls, mount_point):
"""Deregisters a path specification mount point.
Args:
mount_point (str): mount point identifier.
Raises:
KeyError: if the corresponding mount point is not set.
"""
if mount_point not in cls._mount_points:
raise KeyError('Mount point: {0:s} not set.'.format(mount_point))
del cls._mount_points[mount_point]
|
Deregisters a path specification mount point.
Args:
mount_point (str): mount point identifier.
Raises:
KeyError: if the corresponding mount point is not set.
|
def f_add_parameter(self, *args, **kwargs):
""" Adds a parameter under the current node.
There are two ways to add a new parameter either by adding a parameter instance:
>>> new_parameter = Parameter('group1.group2.myparam', data=42, comment='Example!')
>>> traj.f_add_parameter(new_parameter)
Or by passing the values directly to the function, with the name being the first
(non-keyword!) argument:
>>> traj.f_add_parameter('group1.group2.myparam', 42, comment='Example!')
If you want to create a different parameter than the standard parameter, you can
give the constructor as the first (non-keyword!) argument followed by the name
(non-keyword!):
>>> traj.f_add_parameter(PickleParameter,'group1.group2.myparam', data=42, comment='Example!')
The full name of the current node is added as a prefix to the given parameter name.
If the current node is the trajectory the prefix `'parameters'` is added to the name.
Note, all non-keyword and keyword parameters apart from the optional constructor
are passed on as is to the constructor.
Moreover, you always should specify a default data value of a parameter,
even if you want to explore it later.
"""
return self._nn_interface._add_generic(self, type_name=PARAMETER,
group_type_name=PARAMETER_GROUP,
args=args, kwargs=kwargs)
|
Adds a parameter under the current node.
There are two ways to add a new parameter either by adding a parameter instance:
>>> new_parameter = Parameter('group1.group2.myparam', data=42, comment='Example!')
>>> traj.f_add_parameter(new_parameter)
Or by passing the values directly to the function, with the name being the first
(non-keyword!) argument:
>>> traj.f_add_parameter('group1.group2.myparam', 42, comment='Example!')
If you want to create a different parameter than the standard parameter, you can
give the constructor as the first (non-keyword!) argument followed by the name
(non-keyword!):
>>> traj.f_add_parameter(PickleParameter,'group1.group2.myparam', data=42, comment='Example!')
The full name of the current node is added as a prefix to the given parameter name.
If the current node is the trajectory the prefix `'parameters'` is added to the name.
Note, all non-keyword and keyword parameters apart from the optional constructor
are passed on as is to the constructor.
Moreover, you always should specify a default data value of a parameter,
even if you want to explore it later.
|
def load_file(folder_path, idx, corpus):
"""
Load speaker, file, utterance, labels for the file with the given id.
"""
xml_path = os.path.join(folder_path, '{}.xml'.format(idx))
wav_paths = glob.glob(os.path.join(folder_path, '{}_*.wav'.format(idx)))
if len(wav_paths) == 0:
return []
xml_file = open(xml_path, 'r', encoding='utf-8')
soup = BeautifulSoup(xml_file, 'lxml')
transcription = soup.recording.cleaned_sentence.string
transcription_raw = soup.recording.sentence.string
gender = soup.recording.gender.string
is_native = soup.recording.muttersprachler.string
age_class = soup.recording.ageclass.string
speaker_idx = soup.recording.speaker_id.string
if speaker_idx not in corpus.issuers.keys():
start_age_class = int(age_class.split('-')[0])
if start_age_class < 12:
age_group = issuers.AgeGroup.CHILD
elif start_age_class < 18:
age_group = issuers.AgeGroup.YOUTH
elif start_age_class < 65:
age_group = issuers.AgeGroup.ADULT
else:
age_group = issuers.AgeGroup.SENIOR
native_lang = None
if is_native == 'Ja':
native_lang = 'deu'
issuer = issuers.Speaker(speaker_idx,
gender=issuers.Gender(gender),
age_group=age_group,
native_language=native_lang)
corpus.import_issuers(issuer)
utt_ids = []
for wav_path in wav_paths:
wav_name = os.path.split(wav_path)[1]
wav_idx = os.path.splitext(wav_name)[0]
corpus.new_file(wav_path, wav_idx)
utt = corpus.new_utterance(wav_idx, wav_idx, speaker_idx)
utt.set_label_list(annotations.LabelList.create_single(
transcription,
idx=audiomate.corpus.LL_WORD_TRANSCRIPT
))
utt.set_label_list(annotations.LabelList.create_single(
transcription_raw,
idx=audiomate.corpus.LL_WORD_TRANSCRIPT_RAW
))
utt_ids.append(wav_idx)
return utt_ids
|
Load speaker, file, utterance, labels for the file with the given id.
|
def get_gene_disease(self, direct_evidence=None, inference_chemical_name=None, inference_score=None,
gene_name=None, gene_symbol=None, gene_id=None, disease_name=None, disease_id=None,
disease_definition=None, limit=None, as_df=False):
"""Get gene–disease associations
:param bool as_df: if set to True result returns as `pandas.DataFrame`
:param int gene_id: gene identifier
:param str gene_symbol: gene symbol
:param str gene_name: gene name
:param str direct_evidence: direct evidence
:param str inference_chemical_name: inference_chemical_name
:param float inference_score: inference score
:param str inference_chemical_name: chemical name
:param disease_name: disease name
:param disease_id: disease identifier
:param disease_definition: disease definition
:param int limit: maximum number of results
:return: list of :class:`pyctd.manager.database.models.GeneDisease` objects
.. seealso::
:class:`pyctd.manager.models.GeneDisease`
which is linked to:
:class:`pyctd.manager.models.Chemical`
:class:`pyctd.manager.models.Gene`
"""
q = self.session.query(models.GeneDisease)
if direct_evidence:
q = q.filter(models.GeneDisease.direct_evidence == direct_evidence)
if inference_chemical_name:
q = q.filter(models.GeneDisease.inference_chemical_name == inference_chemical_name)
if inference_score:
q = q.filter(models.GeneDisease.inference_score == inference_score)
q = self._join_disease(query=q, disease_definition=disease_definition, disease_id=disease_id,
disease_name=disease_name)
q = self._join_gene(q, gene_name=gene_name, gene_symbol=gene_symbol, gene_id=gene_id)
return self._limit_and_df(q, limit, as_df)
|
Get gene–disease associations
:param bool as_df: if set to True result returns as `pandas.DataFrame`
:param int gene_id: gene identifier
:param str gene_symbol: gene symbol
:param str gene_name: gene name
:param str direct_evidence: direct evidence
:param str inference_chemical_name: inference_chemical_name
:param float inference_score: inference score
:param str inference_chemical_name: chemical name
:param disease_name: disease name
:param disease_id: disease identifier
:param disease_definition: disease definition
:param int limit: maximum number of results
:return: list of :class:`pyctd.manager.database.models.GeneDisease` objects
.. seealso::
:class:`pyctd.manager.models.GeneDisease`
which is linked to:
:class:`pyctd.manager.models.Chemical`
:class:`pyctd.manager.models.Gene`
|
def replace(old, new):
"""
A simple way to replace one element node with another.
"""
parent = old.getparent()
parent.replace(old, new)
|
A simple way to replace one element node with another.
|
def order_vertices(self):
"""Order vertices in the graph such that parents always have a lower index than children."""
ordered = False
while ordered == False:
for i in range(len(self.vertices)):
ordered = True
for parent in self.vertices[i].parents:
if parent>i:
ordered = False
self.swap_vertices(i, parent)
|
Order vertices in the graph such that parents always have a lower index than children.
|
def auth_user_remote_user(self, username):
"""
REMOTE_USER user Authentication
:param username: user's username for remote auth
:type self: User model
"""
user = self.find_user(username=username)
# User does not exist, create one if auto user registration.
if user is None and self.auth_user_registration:
user = self.add_user(
# All we have is REMOTE_USER, so we set
# the other fields to blank.
username=username,
first_name=username,
last_name="-",
email="-",
role=self.find_role(self.auth_user_registration_role),
)
# If user does not exist on the DB and not auto user registration,
# or user is inactive, go away.
elif user is None or (not user.is_active):
log.info(LOGMSG_WAR_SEC_LOGIN_FAILED.format(username))
return None
self.update_user_auth_stat(user)
return user
|
REMOTE_USER user Authentication
:param username: user's username for remote auth
:type self: User model
|
def zip_file(fn, mode="r"):
"""
returns either a zipfile.ZipFile instance or an ExplodedZipFile
instance, depending on whether fn is the name of a valid zip file,
or a directory.
"""
if isdir(fn):
return ExplodedZipFile(fn)
elif is_zipfile(fn):
return ZipFile(fn, mode)
else:
raise Exception("cannot treat as an archive: %r" % fn)
|
returns either a zipfile.ZipFile instance or an ExplodedZipFile
instance, depending on whether fn is the name of a valid zip file,
or a directory.
|
def next_except_jump(self, start):
"""
Return the next jump that was generated by an except SomeException:
construct in a try...except...else clause or None if not found.
"""
if self.code[start] == self.opc.DUP_TOP:
except_match = self.first_instr(start, len(self.code), self.opc.POP_JUMP_IF_FALSE)
if except_match:
jmp = self.prev_op[self.get_target(except_match)]
self.ignore_if.add(except_match)
self.not_continue.add(jmp)
return jmp
count_END_FINALLY = 0
count_SETUP_ = 0
for i in self.op_range(start, len(self.code)):
op = self.code[i]
if op == self.opc.END_FINALLY:
if count_END_FINALLY == count_SETUP_:
assert self.code[self.prev_op[i]] in frozenset([self.opc.JUMP_ABSOLUTE,
self.opc.JUMP_FORWARD,
self.opc.RETURN_VALUE])
self.not_continue.add(self.prev_op[i])
return self.prev_op[i]
count_END_FINALLY += 1
elif op in self.setup_opts_no_loop:
count_SETUP_ += 1
|
Return the next jump that was generated by an except SomeException:
construct in a try...except...else clause or None if not found.
|
def request_data(key, url, file, string_content, start, end, fix_apple):
"""
Request data, update local data cache and remove this Thread form queue.
:param key: key for data source to get result later
:param url: iCal URL
:param file: iCal file path
:param string_content: iCal content as string
:param start: start date
:param end: end date
:param fix_apple: fix known Apple iCal issues
"""
data = []
try:
data += events(url=url, file=file, string_content=string_content,
start=start, end=end, fix_apple=fix_apple)
finally:
update_events(key, data)
request_finished(key)
|
Request data, update local data cache and remove this Thread form queue.
:param key: key for data source to get result later
:param url: iCal URL
:param file: iCal file path
:param string_content: iCal content as string
:param start: start date
:param end: end date
:param fix_apple: fix known Apple iCal issues
|
def get_stp_mst_detail_output_msti_port_configured_root_guard(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
get_stp_mst_detail = ET.Element("get_stp_mst_detail")
config = get_stp_mst_detail
output = ET.SubElement(get_stp_mst_detail, "output")
msti = ET.SubElement(output, "msti")
instance_id_key = ET.SubElement(msti, "instance-id")
instance_id_key.text = kwargs.pop('instance_id')
port = ET.SubElement(msti, "port")
configured_root_guard = ET.SubElement(port, "configured-root-guard")
configured_root_guard.text = kwargs.pop('configured_root_guard')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def get_compounds(identifier, namespace='cid', searchtype=None, as_dataframe=False, **kwargs):
"""Retrieve the specified compound records from PubChem.
:param identifier: The compound identifier to use as a search query.
:param namespace: (optional) The identifier type, one of cid, name, smiles, sdf, inchi, inchikey or formula.
:param searchtype: (optional) The advanced search type, one of substructure, superstructure or similarity.
:param as_dataframe: (optional) Automatically extract the :class:`~pubchempy.Compound` properties into a pandas
:class:`~pandas.DataFrame` and return that.
"""
results = get_json(identifier, namespace, searchtype=searchtype, **kwargs)
compounds = [Compound(r) for r in results['PC_Compounds']] if results else []
if as_dataframe:
return compounds_to_frame(compounds)
return compounds
|
Retrieve the specified compound records from PubChem.
:param identifier: The compound identifier to use as a search query.
:param namespace: (optional) The identifier type, one of cid, name, smiles, sdf, inchi, inchikey or formula.
:param searchtype: (optional) The advanced search type, one of substructure, superstructure or similarity.
:param as_dataframe: (optional) Automatically extract the :class:`~pubchempy.Compound` properties into a pandas
:class:`~pandas.DataFrame` and return that.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.