code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def print_xmlsec_errors(filename, line, func, error_object, error_subject, reason, msg):
"""
Auxiliary method. It overrides the default xmlsec debug message.
"""
info = []
if error_object != "unknown":
info.append("obj=" + error_object)
if error_subject != "unknown":
info.append("subject=" + error_subject)
if msg.strip():
info.append("msg=" + msg)
if reason != 1:
info.append("errno=%d" % reason)
if info:
print("%s:%d(%s)" % (filename, line, func), " ".join(info))
|
Auxiliary method. It overrides the default xmlsec debug message.
|
def set_metadata(self, obj, metadata, clear=False, prefix=None):
"""
Accepts a dictionary of metadata key/value pairs and updates the
specified object metadata with them.
If 'clear' is True, any existing metadata is deleted and only the
passed metadata is retained. Otherwise, the values passed here update
the object's metadata.
By default, the standard object metadata prefix ('X-Object-Meta-') is
prepended to the header name if it isn't present. For non-standard
headers, you must include a non-None prefix, such as an empty string.
"""
# Add the metadata prefix, if needed.
if prefix is None:
prefix = OBJECT_META_PREFIX
massaged = _massage_metakeys(metadata, prefix)
cname = utils.get_name(self.container)
oname = utils.get_name(obj)
new_meta = {}
# Note that the API for object POST is the opposite of that for
# container POST: for objects, all current metadata is deleted,
# whereas for containers you need to set the values to an empty
# string to delete them.
if not clear:
obj_meta = self.get_metadata(obj, prefix=prefix)
new_meta = _massage_metakeys(obj_meta, prefix)
utils.case_insensitive_update(new_meta, massaged)
# Remove any empty values, since the object metadata API will
# store them.
to_pop = []
for key, val in six.iteritems(new_meta):
if not val:
to_pop.append(key)
for key in to_pop:
new_meta.pop(key)
uri = "/%s/%s" % (cname, oname)
resp, resp_body = self.api.method_post(uri, headers=new_meta)
|
Accepts a dictionary of metadata key/value pairs and updates the
specified object metadata with them.
If 'clear' is True, any existing metadata is deleted and only the
passed metadata is retained. Otherwise, the values passed here update
the object's metadata.
By default, the standard object metadata prefix ('X-Object-Meta-') is
prepended to the header name if it isn't present. For non-standard
headers, you must include a non-None prefix, such as an empty string.
|
def get_library(path=None, root=None, db=None):
import ambry.library as _l
"""Return the default library for this installation."""
rc = config(path=path, root=root, db=db )
return _l.new_library(rc)
|
Return the default library for this installation.
|
def _cache_ops_associate(protocol, msgtype):
"""https://github.com/thom311/libnl/blob/libnl3_2_25/lib/cache_mngt.c#L111.
Positional arguments:
protocol -- Netlink protocol (integer).
msgtype -- Netlink message type (integer).
Returns:
nl_cache_ops instance with matching protocol containing matching msgtype or None.
"""
ops = cache_ops
while ops: # Loop until `ops` is None.
if ops.co_protocol == protocol:
for co_msgtype in ops.co_msgtypes:
if co_msgtype.mt_id == msgtype:
return ops
ops = ops.co_next
return None
|
https://github.com/thom311/libnl/blob/libnl3_2_25/lib/cache_mngt.c#L111.
Positional arguments:
protocol -- Netlink protocol (integer).
msgtype -- Netlink message type (integer).
Returns:
nl_cache_ops instance with matching protocol containing matching msgtype or None.
|
def set_position(self, key, latlon, layer=None, rotation=0):
'''move an object on the map'''
self.object_queue.put(SlipPosition(key, latlon, layer, rotation))
|
move an object on the map
|
def _check_accept_keywords(approved, flag):
'''check compatibility of accept_keywords'''
if flag in approved:
return False
elif (flag.startswith('~') and flag[1:] in approved) \
or ('~'+flag in approved):
return False
else:
return True
|
check compatibility of accept_keywords
|
def _get_gpu():
"""*DEPRECATED*. Allocates first available GPU using cudaSetDevice(), or returns 0 otherwise."""
# Note: this code executes, but Tensorflow subsequently complains that the "current context was not created by the StreamExecutor cuda_driver API"
system = platform.system()
if system == "Linux":
libcudart = ct.cdll.LoadLibrary("libcudart.so")
elif system == "Darwin":
libcudart = ct.cdll.LoadLibrary("libcudart.dylib")
elif system == "Windows":
libcudart = ct.windll.LoadLibrary("libcudart.dll")
else:
raise NotImplementedError("Cannot identify system.")
device_count = ct.c_int()
libcudart.cudaGetDeviceCount(ct.byref(device_count))
gpu = 0
for i in range(device_count.value):
if (0 == libcudart.cudaSetDevice(i) and 0 == libcudart.cudaFree(0)):
gpu = i
break
return gpu
|
*DEPRECATED*. Allocates first available GPU using cudaSetDevice(), or returns 0 otherwise.
|
def _is_contiguous(positions):
"""Given a non-empty list, does it consist of contiguous integers?"""
previous = positions[0]
for current in positions[1:]:
if current != previous + 1:
return False
previous = current
return True
|
Given a non-empty list, does it consist of contiguous integers?
|
def merge_overlaps(self, threshold=0.0):
"""
Merge overlapping labels with the same value.
Two labels are considered overlapping,
if ``l2.start - l1.end < threshold``.
Args:
threshold (float): Maximal distance between two labels
to be considered as overlapping.
(default: 0.0)
Example:
>>> ll = LabelList(labels=[
... Label('a_label', 1.0, 2.0),
... Label('a_label', 1.5, 2.7),
... Label('b_label', 1.0, 2.0),
... ])
>>> ll.merge_overlapping_labels()
>>> ll.labels
[
Label('a_label', 1.0, 2.7),
Label('b_label', 1.0, 2.0),
]
"""
updated_labels = []
all_intervals = self.label_tree.copy()
# recursivly find a group of overlapping labels with the same value
def recursive_overlaps(interval):
range_start = interval.begin - threshold
range_end = interval.end + threshold
direct_overlaps = all_intervals.overlap(range_start, range_end)
all_overlaps = [interval]
all_intervals.discard(interval)
for overlap in direct_overlaps:
if overlap.data.value == interval.data.value:
all_overlaps.extend(recursive_overlaps(overlap))
return all_overlaps
# For every remaining interval
# - Find overlapping intervals recursively
# - Remove them
# - Create a concatenated new label
while not all_intervals.is_empty():
next_interval = list(all_intervals)[0]
overlapping = recursive_overlaps(next_interval)
ov_start = float('inf')
ov_end = 0.0
ov_value = next_interval.data.value
for overlap in overlapping:
ov_start = min(ov_start, overlap.begin)
ov_end = max(ov_end, overlap.end)
all_intervals.discard(overlap)
updated_labels.append(Label(
ov_value,
ov_start,
ov_end
))
# Replace the old labels with the updated ones
self.label_tree.clear()
self.update(updated_labels)
|
Merge overlapping labels with the same value.
Two labels are considered overlapping,
if ``l2.start - l1.end < threshold``.
Args:
threshold (float): Maximal distance between two labels
to be considered as overlapping.
(default: 0.0)
Example:
>>> ll = LabelList(labels=[
... Label('a_label', 1.0, 2.0),
... Label('a_label', 1.5, 2.7),
... Label('b_label', 1.0, 2.0),
... ])
>>> ll.merge_overlapping_labels()
>>> ll.labels
[
Label('a_label', 1.0, 2.7),
Label('b_label', 1.0, 2.0),
]
|
def polite_string(a_string):
"""Returns a "proper" string that should work in both Py3/Py2"""
if is_py3() and hasattr(a_string, 'decode'):
try:
return a_string.decode('utf-8')
except UnicodeDecodeError:
return a_string
return a_string
|
Returns a "proper" string that should work in both Py3/Py2
|
def _get_block_matches(self, attributes_a, attributes_b, filter_set_a=None, filter_set_b=None, delta=(0, 0, 0),
tiebreak_with_block_similarity=False):
"""
:param attributes_a: A dict of blocks to their attributes
:param attributes_b: A dict of blocks to their attributes
The following parameters are optional.
:param filter_set_a: A set to limit attributes_a to the blocks in this set.
:param filter_set_b: A set to limit attributes_b to the blocks in this set.
:param delta: An offset to add to each vector in attributes_a.
:returns: A list of tuples of matching objects.
"""
# get the attributes that are in the sets
if filter_set_a is None:
filtered_attributes_a = {k: v for k, v in attributes_a.items()}
else:
filtered_attributes_a = {k: v for k, v in attributes_a.items() if k in filter_set_a}
if filter_set_b is None:
filtered_attributes_b = {k: v for k, v in attributes_b.items()}
else:
filtered_attributes_b = {k: v for k, v in attributes_b.items() if k in filter_set_b}
# add delta
for k in filtered_attributes_a:
filtered_attributes_a[k] = tuple((i+j) for i, j in zip(filtered_attributes_a[k], delta))
for k in filtered_attributes_b:
filtered_attributes_b[k] = tuple((i+j) for i, j in zip(filtered_attributes_b[k], delta))
# get closest
closest_a = _get_closest_matches(filtered_attributes_a, filtered_attributes_b)
closest_b = _get_closest_matches(filtered_attributes_b, filtered_attributes_a)
if tiebreak_with_block_similarity:
# use block similarity to break ties in the first set
for a in closest_a:
if len(closest_a[a]) > 1:
best_similarity = 0
best = []
for x in closest_a[a]:
similarity = self.block_similarity(a, x)
if similarity > best_similarity:
best_similarity = similarity
best = [x]
elif similarity == best_similarity:
best.append(x)
closest_a[a] = best
# use block similarity to break ties in the second set
for b in closest_b:
if len(closest_b[b]) > 1:
best_similarity = 0
best = []
for x in closest_b[b]:
similarity = self.block_similarity(x, b)
if similarity > best_similarity:
best_similarity = similarity
best = [x]
elif similarity == best_similarity:
best.append(x)
closest_b[b] = best
# a match (x,y) is good if x is the closest to y and y is the closest to x
matches = []
for a in closest_a:
if len(closest_a[a]) == 1:
match = closest_a[a][0]
if len(closest_b[match]) == 1 and closest_b[match][0] == a:
matches.append((a, match))
return matches
|
:param attributes_a: A dict of blocks to their attributes
:param attributes_b: A dict of blocks to their attributes
The following parameters are optional.
:param filter_set_a: A set to limit attributes_a to the blocks in this set.
:param filter_set_b: A set to limit attributes_b to the blocks in this set.
:param delta: An offset to add to each vector in attributes_a.
:returns: A list of tuples of matching objects.
|
def openflow_controller_connection_address_connection_method(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
openflow_controller = ET.SubElement(config, "openflow-controller", xmlns="urn:brocade.com:mgmt:brocade-openflow")
controller_name_key = ET.SubElement(openflow_controller, "controller-name")
controller_name_key.text = kwargs.pop('controller_name')
connection_address = ET.SubElement(openflow_controller, "connection-address")
connection_method = ET.SubElement(connection_address, "connection-method")
connection_method.text = kwargs.pop('connection_method')
callback = kwargs.pop('callback', self._callback)
return callback(config)
|
Auto Generated Code
|
def patch(self, nml_patch):
"""Update the namelist from another partial or full namelist.
This is different from the intrinsic `update()` method, which replaces
a namelist section. Rather, it updates the values within a section.
"""
for sec in nml_patch:
if sec not in self:
self[sec] = Namelist()
self[sec].update(nml_patch[sec])
|
Update the namelist from another partial or full namelist.
This is different from the intrinsic `update()` method, which replaces
a namelist section. Rather, it updates the values within a section.
|
def accuracy(sess, model, x, y, batch_size=None, devices=None, feed=None,
attack=None, attack_params=None):
"""
Compute the accuracy of a TF model on some data
:param sess: TF session to use when training the graph
:param model: cleverhans.model.Model instance
:param x: numpy array containing input examples (e.g. MNIST().x_test )
:param y: numpy array containing example labels (e.g. MNIST().y_test )
:param batch_size: Number of examples to use in a single evaluation batch.
If not specified, this function will use a reasonable guess and
may run out of memory.
When choosing the batch size, keep in mind that the batch will
be divided up evenly among available devices. If you can fit 128
examples in memory on one GPU and you have 8 GPUs, you probably
want to use a batch size of 1024 (unless a different batch size
runs faster with the ops you are using, etc.)
:param devices: An optional list of string device names to use.
If not specified, this function will use all visible GPUs.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param attack: cleverhans.attack.Attack
Optional. If no attack specified, evaluates the model on clean data.
If attack is specified, evaluates the model on adversarial examples
created by the attack.
:param attack_params: dictionary
If attack is specified, this dictionary is passed to attack.generate
as keyword arguments.
:return: a float with the accuracy value
"""
_check_x(x)
_check_y(y)
if x.shape[0] != y.shape[0]:
raise ValueError("Number of input examples and labels do not match.")
factory = _CorrectFactory(model, attack, attack_params)
correct, = batch_eval_multi_worker(sess, factory, [x, y],
batch_size=batch_size, devices=devices,
feed=feed)
return correct.mean()
|
Compute the accuracy of a TF model on some data
:param sess: TF session to use when training the graph
:param model: cleverhans.model.Model instance
:param x: numpy array containing input examples (e.g. MNIST().x_test )
:param y: numpy array containing example labels (e.g. MNIST().y_test )
:param batch_size: Number of examples to use in a single evaluation batch.
If not specified, this function will use a reasonable guess and
may run out of memory.
When choosing the batch size, keep in mind that the batch will
be divided up evenly among available devices. If you can fit 128
examples in memory on one GPU and you have 8 GPUs, you probably
want to use a batch size of 1024 (unless a different batch size
runs faster with the ops you are using, etc.)
:param devices: An optional list of string device names to use.
If not specified, this function will use all visible GPUs.
:param feed: An optional dictionary that is appended to the feeding
dictionary before the session runs. Can be used to feed
the learning phase of a Keras model for instance.
:param attack: cleverhans.attack.Attack
Optional. If no attack specified, evaluates the model on clean data.
If attack is specified, evaluates the model on adversarial examples
created by the attack.
:param attack_params: dictionary
If attack is specified, this dictionary is passed to attack.generate
as keyword arguments.
:return: a float with the accuracy value
|
def request(self, path, method='GET', params=None):
"""Builds a request and gets a response."""
if params is None: params = {}
url = urljoin(self.endpoint, path)
headers = {
'Accept': 'application/json',
'Authorization': 'AccessKey ' + self.access_key,
'User-Agent': self.user_agent,
'Content-Type': 'application/json'
}
if method == 'DELETE':
response = requests.delete(url, verify=True, headers=headers, data=json.dumps(params))
elif method == 'GET':
response = requests.get(url, verify=True, headers=headers, params=params)
elif method == 'PATCH':
response = requests.patch(url, verify=True, headers=headers, data=json.dumps(params))
elif method == 'POST':
response = requests.post(url, verify=True, headers=headers, data=json.dumps(params))
elif method == 'PUT':
response = requests.put(url, verify=True, headers=headers, data=json.dumps(params))
else:
raise ValueError(str(method) + ' is not a supported HTTP method')
if response.status_code in self.__supported_status_codes:
response_text = response.text
else:
response.raise_for_status()
return response_text
|
Builds a request and gets a response.
|
def clean_password(self, password, user=None):
"""
Validates a password. You can hook into this if you want to
restric the allowed password choices.
"""
min_length = app_settings.PASSWORD_MIN_LENGTH
if min_length and len(password) < min_length:
raise forms.ValidationError(_("Password must be a minimum of {0} "
"characters.").format(min_length))
validate_password(password, user)
return password
|
Validates a password. You can hook into this if you want to
restric the allowed password choices.
|
def status_codes_by_date_stats():
"""
Get stats for status codes by date.
Returns:
list: status codes + date grouped by type: 2xx, 3xx, 4xx, 5xx, attacks.
"""
def date_counter(queryset):
return dict(Counter(map(
lambda dt: ms_since_epoch(datetime.combine(
make_naive(dt), datetime.min.time())),
list(queryset.values_list('datetime', flat=True)))))
codes = {low: date_counter(
RequestLog.objects.filter(status_code__gte=low, status_code__lt=high))
for low, high in ((200, 300), (300, 400), (400, 500))}
codes[500] = date_counter(RequestLog.objects.filter(status_code__gte=500))
codes['attacks'] = date_counter(RequestLog.objects.filter(
status_code__in=(400, 444, 502)))
stats = {}
for code in (200, 300, 400, 500, 'attacks'):
for date, count in codes[code].items():
if stats.get(date, None) is None:
stats[date] = {200: 0, 300: 0, 400: 0, 500: 0, 'attacks': 0}
stats[date][code] += count
stats = sorted([(k, v) for k, v in stats.items()], key=lambda x: x[0])
return stats
|
Get stats for status codes by date.
Returns:
list: status codes + date grouped by type: 2xx, 3xx, 4xx, 5xx, attacks.
|
def _write_frames(self, handle):
'''Write our frame data to the given file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
'''
assert handle.tell() == 512 * (self.header.data_block - 1)
scale = abs(self.point_scale)
is_float = self.point_scale < 0
point_dtype = [np.int16, np.float32][is_float]
point_scale = [scale, 1][is_float]
point_format = 'if'[is_float]
raw = np.empty((self.point_used, 4), point_dtype)
for points, analog in self._frames:
valid = points[:, 3] > -1
raw[~valid, 3] = -1
raw[valid, :3] = points[valid, :3] / self._point_scale
raw[valid, 3] = (
((points[valid, 4]).astype(np.uint8) << 8) |
(points[valid, 3] / scale).astype(np.uint16)
)
point = array.array(point_format)
point.extend(raw.flatten())
point.tofile(handle)
analog = array.array(point_format)
analog.extend(analog)
analog.tofile(handle)
self._pad_block(handle)
|
Write our frame data to the given file handle.
Parameters
----------
handle : file
Write metadata and C3D motion frames to the given file handle. The
writer does not close the handle.
|
def auto(cls, syslog=None, stderr=None, level=None, extended=None,
server=None):
"""Tries to guess a sound logging configuration.
"""
level = norm_level(level) or logging.INFO
if syslog is None and stderr is None:
if sys.stderr.isatty() or syslog_path() is None:
log.info('Defaulting to STDERR logging.')
syslog, stderr = None, level
if extended is None:
extended = (stderr or 0) <= logging.DEBUG
else:
log.info('Defaulting to logging with Syslog.')
syslog, stderr = level, None
return cls(syslog=syslog, stderr=stderr, extended=extended,
server=server)
|
Tries to guess a sound logging configuration.
|
def trace_buffer_capacity(self):
"""Retrieves the trace buffer's current capacity.
Args:
self (JLink): the ``JLink`` instance.
Returns:
The current capacity of the trace buffer. This is not necessarily
the maximum possible size the buffer could be configured with.
"""
cmd = enums.JLinkTraceCommand.GET_CONF_CAPACITY
data = ctypes.c_uint32(0)
res = self._dll.JLINKARM_TRACE_Control(cmd, ctypes.byref(data))
if (res == 1):
raise errors.JLinkException('Failed to get trace buffer size.')
return data.value
|
Retrieves the trace buffer's current capacity.
Args:
self (JLink): the ``JLink`` instance.
Returns:
The current capacity of the trace buffer. This is not necessarily
the maximum possible size the buffer could be configured with.
|
def keyrelease(self, data):
"""
Release key. NOTE: keypress should be called before this
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer
"""
try:
window = self._get_front_most_window()
except (IndexError,):
window = self._get_any_window()
key_release_action = KeyReleaseAction(window, data)
return 1
|
Release key. NOTE: keypress should be called before this
@param data: data to type.
@type data: string
@return: 1 on success.
@rtype: integer
|
def SRem(a: BitVec, b: BitVec) -> BitVec:
"""Create a signed remainder expression.
:param a:
:param b:
:return:
"""
return _arithmetic_helper(a, b, z3.SRem)
|
Create a signed remainder expression.
:param a:
:param b:
:return:
|
def store_node_label_meta(self, x, y, tx, ty, rot):
"""
This function stored coordinates-related metadate for a node
This function should not be called by the user
:param x: x location of node label or number
:type x: np.float64
:param y: y location of node label or number
:type y: np.float64
:param tx: text location x of node label (numbers)
:type tx: np.float64
:param ty: text location y of node label (numbers)
:type ty: np.float64
:param rot: rotation angle of the text (rotation)
:type rot: float
"""
# Store computed values
self.node_label_coords["x"].append(x)
self.node_label_coords["y"].append(y)
self.node_label_coords["tx"].append(tx)
self.node_label_coords["ty"].append(ty)
# Computes the text alignment for x
if x == 0:
self.node_label_aligns["has"].append("center")
elif x > 0:
self.node_label_aligns["has"].append("left")
else:
self.node_label_aligns["has"].append("right")
# Computes the text alignment for y
if self.node_label_layout == "rotate" or y == 0:
self.node_label_aligns["vas"].append("center")
elif y > 0:
self.node_label_aligns["vas"].append("bottom")
else:
self.node_label_aligns["vas"].append("top")
self.node_label_rotation.append(rot)
|
This function stored coordinates-related metadate for a node
This function should not be called by the user
:param x: x location of node label or number
:type x: np.float64
:param y: y location of node label or number
:type y: np.float64
:param tx: text location x of node label (numbers)
:type tx: np.float64
:param ty: text location y of node label (numbers)
:type ty: np.float64
:param rot: rotation angle of the text (rotation)
:type rot: float
|
def _mulf16(ins):
""" Multiplies 2 32bit (16.16) fixed point numbers. The result is pushed onto the stack.
"""
op1, op2 = tuple(ins.quad[2:])
if _f_ops(op1, op2) is not None:
op1, op2 = _f_ops(op1, op2)
if op2 == 1: # A * 1 => A
output = _f16_oper(op1)
output.append('push de')
output.append('push hl')
return output
if op2 == -1:
return _neg32(ins)
output = _f16_oper(op1)
if op2 == 0:
output.append('ld hl, 0')
output.append('ld e, h')
output.append('ld d, l')
output.append('push de')
output.append('push hl')
return output
output = _f16_oper(op1, str(op2))
output.append('call __MULF16')
output.append('push de')
output.append('push hl')
REQUIRES.add('mulf16.asm')
return output
|
Multiplies 2 32bit (16.16) fixed point numbers. The result is pushed onto the stack.
|
def scan(self,
proxy_scanner,
expected_num=20,
val_thr_num=4,
queue_timeout=3,
val_timeout=5,
out_file='proxies.json'):
"""Scan and validate proxies
Firstly, call the `scan` method of `proxy_scanner`, then using multiple
threads to validate them.
Args:
proxy_scanner: A ProxyScanner object.
expected_num: Max number of valid proxies to be scanned.
val_thr_num: Number of threads used for validating proxies.
queue_timeout: Timeout for getting a proxy from the queue.
val_timeout: An integer passed to `is_valid` as argument `timeout`.
out_file: A string or None. If not None, the proxies will be saved
into `out_file`.
"""
try:
proxy_scanner.scan()
self.logger.info('starting {} threads to validating proxies...'
.format(val_thr_num))
val_threads = []
for i in range(val_thr_num):
t = threading.Thread(
name='val-{:0>2d}'.format(i + 1),
target=self.validate,
kwargs=dict(
proxy_scanner=proxy_scanner,
expected_num=expected_num,
queue_timeout=queue_timeout,
val_timeout=val_timeout))
t.daemon = True
val_threads.append(t)
t.start()
for t in val_threads:
t.join()
self.logger.info('Proxy scanning done!')
except:
raise
finally:
if out_file is not None:
self.save(out_file)
|
Scan and validate proxies
Firstly, call the `scan` method of `proxy_scanner`, then using multiple
threads to validate them.
Args:
proxy_scanner: A ProxyScanner object.
expected_num: Max number of valid proxies to be scanned.
val_thr_num: Number of threads used for validating proxies.
queue_timeout: Timeout for getting a proxy from the queue.
val_timeout: An integer passed to `is_valid` as argument `timeout`.
out_file: A string or None. If not None, the proxies will be saved
into `out_file`.
|
def identify_misfeatured_regions(st, filter_size=5, sigma_cutoff=8.):
"""
Identifies regions of missing/misfeatured particles based on the
residuals' local deviation from uniform Gaussian noise.
Parameters
----------
st : :class:`peri.states.State`
The state in which to identify mis-featured regions.
filter_size : Int, best if odd.
The size of the filter for calculating the local standard deviation;
should approximately be the size of a poorly featured region in
each dimension. Default is 5.
sigma_cutoff : Float or `otsu`, optional
The max allowed deviation of the residuals from what is expected,
in units of the residuals' standard deviation. Lower means more
sensitive, higher = less sensitive. Default is 8.0, i.e. one pixel
out of every 7*10^11 is mis-identified randomly. In practice the
noise is not Gaussian so there are still some regions mis-identified
as improperly featured. Set to ```otsu``` to calculate this number
based on an automatic Otsu threshold.
Returns
-------
tiles : List of :class:`peri.util.Tile`
Each tile is the smallest bounding tile that contains an improperly
featured region. The list is sorted by the tile's volume.
Notes
-----
Algorithm is
1. Create a field of the local standard deviation, as measured over
a hypercube of size filter_size.
2. Find the maximum reasonable value of the field. [The field should
be a random variable with mean of r.std() and standard deviation
of ~r.std() / sqrt(N), where r is the residuals and N is the
number of pixels in the hypercube.]
3. Label & Identify the misfeatured regions as portions where
the local error is too large.
4. Parse the misfeatured regions into tiles.
5. Return the sorted tiles.
The Otsu option to calculate the sigma cutoff works well for images
that actually contain missing particles, returning a number similar
to one calculated with a sigma cutoff. However, if the image is
well-featured with Gaussian residuals, then the Otsu threshold
splits the Gaussian down the middle instead of at the tails, which
is very bad. So use with caution.
"""
# 1. Field of local std
r = st.residuals
weights = np.ones([filter_size]*len(r.shape), dtype='float')
weights /= weights.sum()
f = np.sqrt(nd.filters.convolve(r*r, weights, mode='reflect'))
# 2. Maximal reasonable value of the field.
if sigma_cutoff == 'otsu':
max_ok = initializers.otsu_threshold(f)
else:
# max_ok = f.mean() * (1 + sigma_cutoff / np.sqrt(weights.size))
max_ok = f.mean() + sigma_cutoff * f.std()
# 3. Label & Identify
bad = f > max_ok
labels, n = nd.measurements.label(bad)
inds = []
for i in range(1, n+1):
inds.append(np.nonzero(labels == i))
# 4. Parse into tiles
tiles = [Tile(np.min(ind, axis=1), np.max(ind, axis=1)+1) for ind in inds]
# 5. Sort and return
volumes = [t.volume for t in tiles]
return [tiles[i] for i in np.argsort(volumes)[::-1]]
|
Identifies regions of missing/misfeatured particles based on the
residuals' local deviation from uniform Gaussian noise.
Parameters
----------
st : :class:`peri.states.State`
The state in which to identify mis-featured regions.
filter_size : Int, best if odd.
The size of the filter for calculating the local standard deviation;
should approximately be the size of a poorly featured region in
each dimension. Default is 5.
sigma_cutoff : Float or `otsu`, optional
The max allowed deviation of the residuals from what is expected,
in units of the residuals' standard deviation. Lower means more
sensitive, higher = less sensitive. Default is 8.0, i.e. one pixel
out of every 7*10^11 is mis-identified randomly. In practice the
noise is not Gaussian so there are still some regions mis-identified
as improperly featured. Set to ```otsu``` to calculate this number
based on an automatic Otsu threshold.
Returns
-------
tiles : List of :class:`peri.util.Tile`
Each tile is the smallest bounding tile that contains an improperly
featured region. The list is sorted by the tile's volume.
Notes
-----
Algorithm is
1. Create a field of the local standard deviation, as measured over
a hypercube of size filter_size.
2. Find the maximum reasonable value of the field. [The field should
be a random variable with mean of r.std() and standard deviation
of ~r.std() / sqrt(N), where r is the residuals and N is the
number of pixels in the hypercube.]
3. Label & Identify the misfeatured regions as portions where
the local error is too large.
4. Parse the misfeatured regions into tiles.
5. Return the sorted tiles.
The Otsu option to calculate the sigma cutoff works well for images
that actually contain missing particles, returning a number similar
to one calculated with a sigma cutoff. However, if the image is
well-featured with Gaussian residuals, then the Otsu threshold
splits the Gaussian down the middle instead of at the tails, which
is very bad. So use with caution.
|
def _put (self, url_data):
"""Put URL in queue, increase number of unfished tasks."""
if self.shutdown or self.max_allowed_urls == 0:
return
log.debug(LOG_CACHE, "queueing %s", url_data.url)
key = url_data.cache_url
cache = url_data.aggregate.result_cache
if url_data.has_result or cache.has_result(key):
self.queue.appendleft(url_data)
else:
assert key is not None, "no result for None key: %s" % url_data
if self.max_allowed_urls is not None:
self.max_allowed_urls -= 1
self.num_puts += 1
if self.num_puts >= NUM_PUTS_CLEANUP:
self.cleanup()
self.queue.append(url_data)
self.unfinished_tasks += 1
|
Put URL in queue, increase number of unfished tasks.
|
def _abs_pow_ufunc(self, fi, out, p):
"""Compute |F_i(x)|^p point-wise and write to ``out``."""
# Optimization for very common cases
if p == 0.5:
fi.ufuncs.absolute(out=out)
out.ufuncs.sqrt(out=out)
elif p == 2.0 and self.base_space.field == RealNumbers():
fi.multiply(fi, out=out)
else:
fi.ufuncs.absolute(out=out)
out.ufuncs.power(p, out=out)
|
Compute |F_i(x)|^p point-wise and write to ``out``.
|
def get_user_invitation_by_id(self, id):
"""Retrieve a UserInvitation object by ID."""
return self.db_adapter.get_object(self.UserInvitationClass, id=id)
|
Retrieve a UserInvitation object by ID.
|
def parse_bibliography(source, loc, tokens):
"""
Combines the parsed entries into a Bibliography instance.
"""
bib = structures.Bibliography()
for entry in tokens:
bib.add(entry)
return bib
|
Combines the parsed entries into a Bibliography instance.
|
def push(self, values: np.ndarray):
"""
Push values to buffer. If buffer can't store all values a ValueError is raised
"""
n = len(values)
if len(self) + n > self.size:
raise ValueError("Too much data to push to RingBuffer")
slide_1 = np.s_[self.right_index:min(self.right_index + n, self.size)]
slide_2 = np.s_[:max(self.right_index + n - self.size, 0)]
with self.__data.get_lock():
data = np.frombuffer(self.__data.get_obj(), dtype=np.complex64)
data[slide_1] = values[:slide_1.stop - slide_1.start]
data[slide_2] = values[slide_1.stop - slide_1.start:]
self.right_index += n
self.__length.value += n
|
Push values to buffer. If buffer can't store all values a ValueError is raised
|
def get_pplan(self, topologyName, callback=None):
""" get physical plan """
isWatching = False
# Temp dict used to return result
# if callback is not provided.
ret = {
"result": None
}
if callback:
isWatching = True
else:
def callback(data):
"""
Custom callback to get the topologies right now.
"""
ret["result"] = data
self._get_pplan_with_watch(topologyName, callback, isWatching)
# The topologies are now populated with the data.
return ret["result"]
|
get physical plan
|
async def _watch(self, node, conn, names):
"Watches the values at keys ``names``"
for name in names:
slot = self._determine_slot('WATCH', name)
dist_node = self.connection_pool.get_node_by_slot(slot)
if node.get('name') != dist_node['name']:
# raise error if commands in a transaction can not hash to same node
if len(node) > 0:
raise ClusterTransactionError("Keys in request don't hash to the same node")
if self.explicit_transaction:
raise RedisError('Cannot issue a WATCH after a MULTI')
await conn.send_command('WATCH', *names)
return await conn.read_response()
|
Watches the values at keys ``names``
|
def _process_worker(call_queue, result_queue):
"""Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty.
"""
while True:
call_item = call_queue.get(block=True)
if call_item is None:
# Wake up queue management thread
result_queue.put(None)
return
try:
r = call_item.fn(*call_item.args, **call_item.kwargs)
except BaseException:
e = sys.exc_info()[1]
result_queue.put(_ResultItem(call_item.work_id,
exception=e))
else:
result_queue.put(_ResultItem(call_item.work_id,
result=r))
|
Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a separate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will written
to by the worker.
shutdown: A multiprocessing.Event that will be set as a signal to the
worker that it should exit when call_queue is empty.
|
def get_template(self, template, def_name=None):
'''Retrieve a *Django* API template object for the given template name, using the app_path and template_subdir
settings in this object. This method still uses the corresponding Mako template and engine, but it
gives a Django API wrapper around it so you can use it the same as any Django template.
If def_name is provided, template rendering will be limited to the named def/block (see Mako docs).
This method corresponds to the Django templating system API.
A Django exception is raised if the template is not found or cannot compile.
'''
try:
# wrap the mako template in an adapter that gives the Django template API
return MakoTemplateAdapter(self.get_mako_template(template), def_name)
except (TopLevelLookupException, TemplateLookupException) as e: # Mako exception raised
tdne = TemplateDoesNotExist('Template "%s" not found in search path: %s.' % (template, self.template_search_dirs))
if settings.DEBUG:
tdne.template_debug = get_template_debug(template, e)
raise tdne from e
except (CompileException, SyntaxException) as e: # Mako exception raised
tse = TemplateSyntaxError('Template "%s" raised an error: %s' % (template, e))
if settings.DEBUG:
tse.template_debug = get_template_debug(template, e)
raise tse from e
|
Retrieve a *Django* API template object for the given template name, using the app_path and template_subdir
settings in this object. This method still uses the corresponding Mako template and engine, but it
gives a Django API wrapper around it so you can use it the same as any Django template.
If def_name is provided, template rendering will be limited to the named def/block (see Mako docs).
This method corresponds to the Django templating system API.
A Django exception is raised if the template is not found or cannot compile.
|
def load(name):
"""Parse the tile map and add it to the world."""
global __tile_maps
# Remove the current map.
TileMapManager.unload()
TileMapManager.active_map = __tile_maps[name]
TileMapManager.active_map.parse_tilemap()
TileMapManager.active_map.parse_collisions()
TileMapManager.active_map.parse_objects()
world = Ragnarok.get_world()
world.add_obj(TileMapManager.active_map)
for obj in TileMapManager.active_map.objects:
world.add_obj(obj)
|
Parse the tile map and add it to the world.
|
def on_error(self, ex):
"""
Reimplemented from :meth:`~AsyncViewBase.on_error`
"""
if self._d:
self._d.errback()
self._d = None
|
Reimplemented from :meth:`~AsyncViewBase.on_error`
|
def init(self, request, paypal_request, paypal_response):
"""Initialize a PayPalNVP instance from a HttpRequest."""
if request is not None:
from paypal.pro.helpers import strip_ip_port
self.ipaddress = strip_ip_port(request.META.get('REMOTE_ADDR', ''))
if (hasattr(request, "user") and request.user.is_authenticated):
self.user = request.user
else:
self.ipaddress = ''
# No storing credit card info.
query_data = dict((k, v) for k, v in paypal_request.items() if k not in self.RESTRICTED_FIELDS)
self.query = urlencode(query_data)
self.response = urlencode(paypal_response)
# Was there a flag on the play?
ack = paypal_response.get('ack', False)
if ack != "Success":
if ack == "SuccessWithWarning":
warn_untested()
self.flag_info = paypal_response.get('l_longmessage0', '')
else:
self.set_flag(paypal_response.get('l_longmessage0', ''), paypal_response.get('l_errorcode', ''))
|
Initialize a PayPalNVP instance from a HttpRequest.
|
def _default_template_args(self, content_template):
"""Initialize template args."""
def include(text, args):
template_name = pystache.render(text, args)
return self._renderer.render_name(template_name, args)
# Our base template calls include on the content_template.
ret = {'content_template': content_template}
ret['include'] = lambda text: include(text, ret)
return ret
|
Initialize template args.
|
def to_sequence_field(cls):
"""
Returns a callable instance that will convert a value to a Sequence.
:param cls: Valid class type of the items in the Sequence.
:return: instance of the SequenceConverter.
"""
class SequenceConverter(object):
def __init__(self, cls):
self._cls = cls
@property
def cls(self):
return resolve_class(self._cls)
def __call__(self, values):
values = values or []
args = [to_model(self.cls, value) for value in values]
return TypedSequence(cls=self.cls, args=args)
return SequenceConverter(cls)
|
Returns a callable instance that will convert a value to a Sequence.
:param cls: Valid class type of the items in the Sequence.
:return: instance of the SequenceConverter.
|
def _queue_declare_ok(self, args):
"""
confirms a queue definition
This method confirms a Declare method and confirms the name of
the queue, essential for automatically-named queues.
PARAMETERS:
queue: shortstr
Reports the name of the queue. If the server generated
a queue name, this field contains that name.
message_count: long
number of messages in queue
Reports the number of messages in the queue, which
will be zero for newly-created queues.
consumer_count: long
number of consumers
Reports the number of active consumers for the queue.
Note that consumers can suspend activity
(Channel.Flow) in which case they do not appear in
this count.
"""
queue = args.read_shortstr()
message_count = args.read_long()
consumer_count = args.read_long()
return queue, message_count, consumer_count
|
confirms a queue definition
This method confirms a Declare method and confirms the name of
the queue, essential for automatically-named queues.
PARAMETERS:
queue: shortstr
Reports the name of the queue. If the server generated
a queue name, this field contains that name.
message_count: long
number of messages in queue
Reports the number of messages in the queue, which
will be zero for newly-created queues.
consumer_count: long
number of consumers
Reports the number of active consumers for the queue.
Note that consumers can suspend activity
(Channel.Flow) in which case they do not appear in
this count.
|
def extract(code, tree, prefix=[]):
"""Extract Huffman code from a Huffman tree
:param tree: a node of the tree
:param prefix: a list with the 01 characters encoding the path from
the root to the node `tree`
:complexity: O(n)
"""
if isinstance(tree, list):
l, r = tree
prefix.append('0')
extract(code, l, prefix)
prefix.pop()
prefix.append('1')
extract(code, r, prefix)
prefix.pop()
else:
code[tree] = ''.join(prefix)
|
Extract Huffman code from a Huffman tree
:param tree: a node of the tree
:param prefix: a list with the 01 characters encoding the path from
the root to the node `tree`
:complexity: O(n)
|
def objects_copy(self, source_bucket, source_key, target_bucket, target_key):
"""Updates the metadata associated with an object.
Args:
source_bucket: the name of the bucket containing the source object.
source_key: the key of the source object being copied.
target_bucket: the name of the bucket that will contain the copied object.
target_key: the key of the copied object.
Returns:
A parsed object information dictionary.
Raises:
Exception if there is an error performing the operation.
"""
url = Api._ENDPOINT + (Api._OBJECT_COPY_PATH % (source_bucket, Api._escape_key(source_key),
target_bucket, Api._escape_key(target_key)))
return datalab.utils.Http.request(url, method='POST', credentials=self._credentials)
|
Updates the metadata associated with an object.
Args:
source_bucket: the name of the bucket containing the source object.
source_key: the key of the source object being copied.
target_bucket: the name of the bucket that will contain the copied object.
target_key: the key of the copied object.
Returns:
A parsed object information dictionary.
Raises:
Exception if there is an error performing the operation.
|
def add(self, actors):
"""Append input object to the internal list of actors to be shown.
:return: returns input actor for possible concatenation.
"""
if utils.isSequence(actors):
for a in actors:
if a not in self.actors:
self.actors.append(a)
return None
else:
self.actors.append(actors)
return actors
|
Append input object to the internal list of actors to be shown.
:return: returns input actor for possible concatenation.
|
def remove_highlight_nodes(graph: BELGraph, nodes: Optional[Iterable[BaseEntity]]=None) -> None:
"""Removes the highlight from the given nodes, or all nodes if none given.
:param graph: A BEL graph
:param nodes: The list of nodes to un-highlight
"""
for node in graph if nodes is None else nodes:
if is_node_highlighted(graph, node):
del graph.node[node][NODE_HIGHLIGHT]
|
Removes the highlight from the given nodes, or all nodes if none given.
:param graph: A BEL graph
:param nodes: The list of nodes to un-highlight
|
def _strip_postfix(req):
"""
Strip req postfix ( -dev, 0.2, etc )
"""
# FIXME: use package_to_requirement?
match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req)
if match:
# Strip off -dev, -0.2, etc.
req = match.group(1)
return req
|
Strip req postfix ( -dev, 0.2, etc )
|
def _process_resp(request_id, response, is_success_func):
"""
:param request_id: campus url identifying the request
:param response: the GET method response object
:param is_success_func: the name of the function for
verifying a success code
:return: True if successful, False otherwise.
raise DataFailureException or a corresponding TrumbaException
if the request failed or an error code has been returned.
"""
if response.status != 200:
raise DataFailureException(request_id,
response.status,
response.reason
)
if response.data is None:
raise NoDataReturned()
root = objectify.fromstring(response.data)
if root.ResponseMessage is None or\
root.ResponseMessage.attrib['Code'] is None:
raise UnknownError()
resp_code = int(root.ResponseMessage.attrib['Code'])
func = partial(is_success_func)
if func(resp_code):
return True
_check_err(resp_code, request_id)
|
:param request_id: campus url identifying the request
:param response: the GET method response object
:param is_success_func: the name of the function for
verifying a success code
:return: True if successful, False otherwise.
raise DataFailureException or a corresponding TrumbaException
if the request failed or an error code has been returned.
|
def shlex_quote(s):
"""Return a shell-escaped version of the string *s*.
Backported from Python 3.3 standard library module shlex.
"""
if is_py3: # use the latest version instead of backporting if it's available
return quote(s)
if not s:
return "''"
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return "'" + s.replace("'", "'\"'\"'") + "'"
|
Return a shell-escaped version of the string *s*.
Backported from Python 3.3 standard library module shlex.
|
def register_dataset(self,
dataset,
expr,
deltas=None,
checkpoints=None,
odo_kwargs=None):
"""Explicitly map a datset to a collection of blaze expressions.
Parameters
----------
dataset : DataSet
The pipeline dataset to map to the given expressions.
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
See Also
--------
:func:`zipline.pipeline.loaders.blaze.from_blaze`
"""
expr_data = ExprData(
expr,
deltas,
checkpoints,
odo_kwargs,
)
for column in dataset.columns:
self._table_expressions[column] = expr_data
|
Explicitly map a datset to a collection of blaze expressions.
Parameters
----------
dataset : DataSet
The pipeline dataset to map to the given expressions.
expr : Expr
The baseline values.
deltas : Expr, optional
The deltas for the data.
checkpoints : Expr, optional
The forward fill checkpoints for the data.
odo_kwargs : dict, optional
The keyword arguments to forward to the odo calls internally.
See Also
--------
:func:`zipline.pipeline.loaders.blaze.from_blaze`
|
def needs_to_auth(self, dbname):
"""
Determines if the server needs to authenticate to the database.
NOTE: we stopped depending on is_auth() since its only a configuration
and may not be accurate
"""
log_debug("Checking if server '%s' needs to auth on db '%s'...." %
(self.id, dbname))
try:
client = self.get_mongo_client()
db = client.get_database(dbname)
db.collection_names()
result = False
except (RuntimeError,Exception), e:
log_exception(e)
# updated for to handle auth failures from mongodb 3.6
result = "authorized" in str(e) or "there are no users authenticated" in str(e)
log_debug("needs_to_auth check for server '%s' on db '%s' : %s" %
(self.id, dbname, result))
return result
|
Determines if the server needs to authenticate to the database.
NOTE: we stopped depending on is_auth() since its only a configuration
and may not be accurate
|
def dataset(directory, images_file, labels_file):
"""Download and parse MNIST dataset."""
images_file = download(directory, images_file)
labels_file = download(directory, labels_file)
check_image_file_header(images_file)
check_labels_file_header(labels_file)
def decode_image(image):
# Normalize from [0, 255] to [0.0, 1.0]
image = tf.decode_raw(image, tf.uint8)
image = tf.cast(image, tf.float32)
image = tf.reshape(image, [784])
return image / 255.0
def decode_label(label):
label = tf.decode_raw(label, tf.uint8) # tf.string -> [tf.uint8]
label = tf.reshape(label, []) # label is a scalar
return tf.to_int32(label)
images = tf.data.FixedLengthRecordDataset(
images_file, 28 * 28, header_bytes=16).map(decode_image)
labels = tf.data.FixedLengthRecordDataset(
labels_file, 1, header_bytes=8).map(decode_label)
return tf.data.Dataset.zip((images, labels))
|
Download and parse MNIST dataset.
|
def dt_cluster(dt_list, dt_thresh=16.0):
"""Find clusters of similar datetimes within datetime list
"""
if not isinstance(dt_list[0], float):
o_list = dt2o(dt_list)
else:
o_list = dt_list
o_list_sort = np.sort(o_list)
o_list_sort_idx = np.argsort(o_list)
d = np.diff(o_list_sort)
#These are indices of breaks
#Add one so each b starts a cluster
b = np.nonzero(d > dt_thresh)[0] + 1
#Add one to shape so we include final index
b = np.hstack((0, b, d.shape[0] + 1))
f_list = []
for i in range(len(b)-1):
#Need to subtract 1 here to give cluster bounds
b_idx = [b[i], b[i+1]-1]
b_dt = o_list_sort[b_idx]
#These should be identical if input is already sorted
b_idx_orig = o_list_sort_idx[b_idx]
all_idx = np.arange(b_idx[0], b_idx[1])
all_sort = o_list_sort[all_idx]
#These should be identical if input is already sorted
all_idx_orig = o_list_sort_idx[all_idx]
dict = {}
dict['break_indices'] = b_idx_orig
dict['break_ts_o'] = b_dt
dict['break_ts_dt'] = o2dt(b_dt)
dict['all_indices'] = all_idx_orig
dict['all_ts_o'] = all_sort
dict['all_ts_dt'] = o2dt(all_sort)
f_list.append(dict)
return f_list
|
Find clusters of similar datetimes within datetime list
|
def integrate_adaptive(rhs, jac, y0, x0, xend, atol, rtol, dx0=.0, dx_max=.0,
check_callable=False, check_indexing=False, **kwargs):
"""
Integrates a system of ordinary differential equations.
Parameters
----------
rhs: callable
Function with signature f(t, y, fout) which modifies fout *inplace*.
jac: callable
Function with signature j(t, y, jmat_out, dfdx_out) which modifies
jmat_out and dfdx_out *inplace*.
y0: array_like
Initial values of the dependent variables.
x0: float
Initial value of the independent variable.
xend: float
Stopping value for the independent variable.
atol: float
Absolute tolerance.
rtol: float
Relative tolerance.
dx0: float
Initial step-size.
dx_max: float
Maximum step-size.
check_callable: bool (default: False)
Perform signature sanity checks on ``rhs`` and ``jac``.
check_indexing: bool (default: False)
Perform item setting sanity checks on ``rhs`` and ``jac``.
\*\*kwargs:
'method': str
'rosenbrock4', 'dopri5' or 'bs'
'return_on_error': bool
Returns on error without raising an excpetion (with ``'success'==False``).
'autorestart': int
Useful for autonomous systems where conditions change during integration.
Will restart the integration with ``x==0``.
'dx0cb': callable
Callback for calculating dx0 (make sure to pass ``dx0==0.0``) to enable.
Signature: ``f(x, y[:]) -> float``.
Returns
-------
(xout, yout, info):
xout: 1-dimensional array of values for the independent variable
yout: 2-dimensional array of the dependent variables (axis 1) for
values corresponding to xout (axis 0)
info: dictionary with information about the integration
"""
# Sanity checks to reduce risk of having a segfault:
jac = _ensure_5args(jac)
if check_callable:
_check_callable(rhs, jac, x0, y0)
if check_indexing:
_check_indexing(rhs, jac, x0, y0)
return adaptive(rhs, jac, np.asarray(y0, dtype=np.float64), x0, xend, atol, rtol, dx0, dx_max, **_bs(kwargs))
|
Integrates a system of ordinary differential equations.
Parameters
----------
rhs: callable
Function with signature f(t, y, fout) which modifies fout *inplace*.
jac: callable
Function with signature j(t, y, jmat_out, dfdx_out) which modifies
jmat_out and dfdx_out *inplace*.
y0: array_like
Initial values of the dependent variables.
x0: float
Initial value of the independent variable.
xend: float
Stopping value for the independent variable.
atol: float
Absolute tolerance.
rtol: float
Relative tolerance.
dx0: float
Initial step-size.
dx_max: float
Maximum step-size.
check_callable: bool (default: False)
Perform signature sanity checks on ``rhs`` and ``jac``.
check_indexing: bool (default: False)
Perform item setting sanity checks on ``rhs`` and ``jac``.
\*\*kwargs:
'method': str
'rosenbrock4', 'dopri5' or 'bs'
'return_on_error': bool
Returns on error without raising an excpetion (with ``'success'==False``).
'autorestart': int
Useful for autonomous systems where conditions change during integration.
Will restart the integration with ``x==0``.
'dx0cb': callable
Callback for calculating dx0 (make sure to pass ``dx0==0.0``) to enable.
Signature: ``f(x, y[:]) -> float``.
Returns
-------
(xout, yout, info):
xout: 1-dimensional array of values for the independent variable
yout: 2-dimensional array of the dependent variables (axis 1) for
values corresponding to xout (axis 0)
info: dictionary with information about the integration
|
def clone(self):
"""
Return an independent copy of this layout with a completely separate
color_list and no drivers.
"""
args = {k: getattr(self, k) for k in self.CLONE_ATTRS}
args['color_list'] = copy.copy(self.color_list)
return self.__class__([], **args)
|
Return an independent copy of this layout with a completely separate
color_list and no drivers.
|
def DirEntryScanner(**kw):
"""Return a prototype Scanner instance for "scanning"
directory Nodes for their in-memory entries"""
kw['node_factory'] = SCons.Node.FS.Entry
kw['recursive'] = None
return SCons.Scanner.Base(scan_in_memory, "DirEntryScanner", **kw)
|
Return a prototype Scanner instance for "scanning"
directory Nodes for their in-memory entries
|
def build_single(scheme_file, templates, base_output_dir):
"""Build colorscheme for a single $scheme_file using all TemplateGroup
instances in $templates."""
scheme = get_yaml_dict(scheme_file)
scheme_slug = slugify(scheme_file)
format_scheme(scheme, scheme_slug)
scheme_name = scheme['scheme-name']
print('Building colorschemes for scheme "{}"…'.format(scheme_name))
for temp_group in templates:
for _, sub in temp_group.templates.items():
output_dir = os.path.join(base_output_dir,
temp_group.name,
sub['output'])
try:
os.makedirs(output_dir)
except FileExistsError:
pass
if sub['extension'] is not None:
filename = 'base16-{}{}'.format(scheme_slug, sub['extension'])
else:
filename = 'base16-{}'.format(scheme_slug)
build_path = os.path.join(output_dir, filename)
with open(build_path, 'w') as file_:
file_content = pystache.render(sub['parsed'], scheme)
file_.write(file_content)
print('Built colorschemes for scheme "{}".'.format(scheme_name))
|
Build colorscheme for a single $scheme_file using all TemplateGroup
instances in $templates.
|
def __create_db_and_container(self):
"""Call the get or create methods."""
db_id = self.config.database
container_name = self.config.container
self.db = self.__get_or_create_database(self.client, db_id)
self.container = self.__get_or_create_container(
self.client, container_name
)
|
Call the get or create methods.
|
def get(self, sid):
"""
Constructs a EventContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.event.EventContext
:rtype: twilio.rest.taskrouter.v1.workspace.event.EventContext
"""
return EventContext(self._version, workspace_sid=self._solution['workspace_sid'], sid=sid, )
|
Constructs a EventContext
:param sid: The sid
:returns: twilio.rest.taskrouter.v1.workspace.event.EventContext
:rtype: twilio.rest.taskrouter.v1.workspace.event.EventContext
|
def metadata_add_description(self):
""" Metadata: add description """
service_description = {}
if (self.args.json):
service_description = json.loads(self.args.json)
if (self.args.url):
if "url" in service_description:
raise Exception("json service description already contains url field")
service_description["url"] = self.args.url
if (self.args.description):
if "description" in service_description:
raise Exception("json service description already contains description field")
service_description["description"] = self.args.description
metadata = load_mpe_service_metadata(self.args.metadata_file)
# merge with old service_description if necessary
if ("service_description" in metadata):
service_description = {**metadata["service_description"], **service_description}
metadata.set_simple_field("service_description", service_description)
metadata.save_pretty(self.args.metadata_file)
|
Metadata: add description
|
def parse_subdomain_missing_zonefiles_record(cls, rec):
"""
Parse a missing-zonefiles vector given by the domain.
Returns the list of zone file indexes on success
Raises ParseError on unparseable records
"""
txt_entry = rec['txt']
if isinstance(txt_entry, list):
raise ParseError("TXT entry too long for a missing zone file list")
try:
return [int(i) for i in txt_entry.split(',')] if txt_entry is not None and len(txt_entry) > 0 else []
except ValueError:
raise ParseError('Invalid integers')
|
Parse a missing-zonefiles vector given by the domain.
Returns the list of zone file indexes on success
Raises ParseError on unparseable records
|
def json2value(json_string, params=Null, flexible=False, leaves=False):
"""
:param json_string: THE JSON
:param params: STANDARD JSON PARAMS
:param flexible: REMOVE COMMENTS
:param leaves: ASSUME JSON KEYS ARE DOT-DELIMITED
:return: Python value
"""
if not is_text(json_string):
Log.error("only unicode json accepted")
try:
if flexible:
# REMOVE """COMMENTS""", # COMMENTS, //COMMENTS, AND \n \r
# DERIVED FROM https://github.com/jeads/datasource/blob/master/datasource/bases/BaseHub.py# L58
json_string = re.sub(r"\"\"\".*?\"\"\"", r"\n", json_string, flags=re.MULTILINE)
json_string = "\n".join(remove_line_comment(l) for l in json_string.split("\n"))
# ALLOW DICTIONARY'S NAME:VALUE LIST TO END WITH COMMA
json_string = re.sub(r",\s*\}", r"}", json_string)
# ALLOW LISTS TO END WITH COMMA
json_string = re.sub(r",\s*\]", r"]", json_string)
if params:
# LOOKUP REFERENCES
json_string = expand_template(json_string, params)
try:
value = wrap(json_decoder(text_type(json_string)))
except Exception as e:
Log.error("can not decode\n{{content}}", content=json_string, cause=e)
if leaves:
value = wrap_leaves(value)
return value
except Exception as e:
e = Except.wrap(e)
if not json_string.strip():
Log.error("JSON string is only whitespace")
c = e
while "Expecting '" in c.cause and "' delimiter: line" in c.cause:
c = c.cause
if "Expecting '" in c and "' delimiter: line" in c:
line_index = int(strings.between(c.message, " line ", " column ")) - 1
column = int(strings.between(c.message, " column ", " ")) - 1
line = json_string.split("\n")[line_index].replace("\t", " ")
if column > 20:
sample = "..." + line[column - 20:]
pointer = " " + (" " * 20) + "^"
else:
sample = line
pointer = (" " * column) + "^"
if len(sample) > 43:
sample = sample[:43] + "..."
Log.error(CAN_NOT_DECODE_JSON + " at:\n\t{{sample}}\n\t{{pointer}}\n", sample=sample, pointer=pointer)
base_str = strings.limit(json_string, 1000).encode('utf8')
hexx_str = bytes2hex(base_str, " ")
try:
char_str = " " + " ".join((c.decode("latin1") if ord(c) >= 32 else ".") for c in base_str)
except Exception:
char_str = " "
Log.error(CAN_NOT_DECODE_JSON + ":\n{{char_str}}\n{{hexx_str}}\n", char_str=char_str, hexx_str=hexx_str, cause=e)
|
:param json_string: THE JSON
:param params: STANDARD JSON PARAMS
:param flexible: REMOVE COMMENTS
:param leaves: ASSUME JSON KEYS ARE DOT-DELIMITED
:return: Python value
|
def validate(instance, schema, cls=None, *args, **kwargs):
"""
Validate an instance under the given schema.
>>> validate([2, 3, 4], {"maxItems" : 2})
Traceback (most recent call last):
...
ValidationError: [2, 3, 4] is too long
:func:`validate` will first verify that the provided schema is itself
valid, since not doing so can lead to less obvious error messages and fail
in less obvious or consistent ways. If you know you have a valid schema
already or don't care, you might prefer using the
:meth:`~IValidator.validate` method directly on a specific validator
(e.g. :meth:`Draft4Validator.validate`).
:argument instance: the instance to validate
:argument schema: the schema to validate with
:argument cls: an :class:`IValidator` class that will be used to validate
the instance.
If the ``cls`` argument is not provided, two things will happen in
accordance with the specification. First, if the schema has a
:validator:`$schema` property containing a known meta-schema [#]_ then the
proper validator will be used. The specification recommends that all
schemas contain :validator:`$schema` properties for this reason. If no
:validator:`$schema` property is found, the default validator class is
:class:`Draft4Validator`.
Any other provided positional and keyword arguments will be passed on when
instantiating the ``cls``.
:raises:
:exc:`ValidationError` if the instance is invalid
:exc:`SchemaError` if the schema itself is invalid
.. rubric:: Footnotes
.. [#] known by a validator registered with :func:`validates`
"""
if cls is None:
cls = validator_for(schema)
cls.check_schema(schema)
cls(schema, *args, **kwargs).validate(instance)
|
Validate an instance under the given schema.
>>> validate([2, 3, 4], {"maxItems" : 2})
Traceback (most recent call last):
...
ValidationError: [2, 3, 4] is too long
:func:`validate` will first verify that the provided schema is itself
valid, since not doing so can lead to less obvious error messages and fail
in less obvious or consistent ways. If you know you have a valid schema
already or don't care, you might prefer using the
:meth:`~IValidator.validate` method directly on a specific validator
(e.g. :meth:`Draft4Validator.validate`).
:argument instance: the instance to validate
:argument schema: the schema to validate with
:argument cls: an :class:`IValidator` class that will be used to validate
the instance.
If the ``cls`` argument is not provided, two things will happen in
accordance with the specification. First, if the schema has a
:validator:`$schema` property containing a known meta-schema [#]_ then the
proper validator will be used. The specification recommends that all
schemas contain :validator:`$schema` properties for this reason. If no
:validator:`$schema` property is found, the default validator class is
:class:`Draft4Validator`.
Any other provided positional and keyword arguments will be passed on when
instantiating the ``cls``.
:raises:
:exc:`ValidationError` if the instance is invalid
:exc:`SchemaError` if the schema itself is invalid
.. rubric:: Footnotes
.. [#] known by a validator registered with :func:`validates`
|
def param_changed_to(self, key, to_value, from_value=None):
"""
Returns true if the given parameter, with name key, has transitioned to the given value.
"""
last_value = getattr(self.last_manifest, key)
current_value = self.current_manifest.get(key)
if from_value is not None:
return last_value == from_value and current_value == to_value
return last_value != to_value and current_value == to_value
|
Returns true if the given parameter, with name key, has transitioned to the given value.
|
def main(args): ## pylint: disable=too-many-branches
"""Entrypoint of the whole commandline application"""
EXNAME = os.path.basename(__file__ if WIN32 else sys.argv[0])
for ext in (".py", ".pyc", ".exe", "-script.py", "-script.pyc"): ## pragma: no cover
if EXNAME.endswith(ext): ## pragma: no cover
EXNAME = EXNAME[:-len(ext)]
break
USAGE = """\
Usage:
%(exname)s {-h|--help}
%(exname)s {-V|--version}
%(exname)s [-y|--yaml] [-q|--quiet] ACTION KEY [DEFAULT]
""" % {"exname": EXNAME}
HELP = """
Parses and output chosen subpart or values from YAML input.
It reads YAML in stdin and will output on stdout it's return value.
%(usage)s
Options:
-y, --yaml
Output only YAML safe value, more precisely, even
literal values will be YAML quoted. This behavior
is required if you want to output YAML subparts and
further process it. If you know you have are dealing
with safe literal value, then you don't need this.
(Default: no safe YAML output)
-q, --quiet
In case KEY value queried is an invalid path, quiet
mode will prevent the writing of an error message on
standard error.
(Default: no quiet mode)
-L, --line-buffer
Force parsing stdin line by line allowing to process
streamed YAML as it is fed instead of buffering
input and treating several YAML streamed document
at once. This is likely to have some small performance
hit if you have a huge stream of YAML document, but
then you probably don't really care about the
line-buffering.
(Default: no line buffering)
ACTION Depending on the type of data you've targetted
thanks to the KEY, ACTION can be:
These ACTIONs applies to any YAML type:
get-type ## returns a short string
get-value ## returns YAML
These ACTIONs applies to 'sequence' and 'struct' YAML type:
get-values{,-0} ## returns list of YAML
get-length ## returns an integer
These ACTION applies to 'struct' YAML type:
keys{,-0} ## returns list of YAML
values{,-0} ## returns list of YAML
key-values,{,-0} ## returns list of YAML
Note that any value returned is returned on stdout, and
when returning ``list of YAML``, it'll be separated by
a newline or ``NUL`` char depending of you've used the
``-0`` suffixed ACTION.
KEY Identifier to browse and target subvalues into YAML
structure. Use ``.`` to parse a subvalue. If you need
to use a literal ``.`` or ``\\``, use ``\\`` to quote it.
Use struct keyword to browse ``struct`` YAML data and use
integers to browse ``sequence`` YAML data.
DEFAULT if not provided and given KEY do not match any value in
the provided YAML, then DEFAULT will be returned. If no
default is provided and the KEY do not match any value
in the provided YAML, %(exname)s will fail with an error
message.
Examples:
## get last grocery
cat recipe.yaml | %(exname)s get-value groceries.-1
## get all words of my french dictionary
cat dictionaries.yaml | %(exname)s keys-0 french.dictionary
## get YAML config part of 'myhost'
cat hosts_config.yaml | %(exname)s get-value cfgs.myhost
""" % {"exname": EXNAME, "usage": USAGE}
USAGE = textwrap.dedent(USAGE)
HELP = textwrap.dedent(HELP)
opts = _parse_args(args, USAGE, HELP)
quiet = opts.pop("quiet")
try:
first = True
for output in do(stream=sys.stdin, **opts):
if first:
first = False
else:
if opts["action"] not in ACTION_SUPPORTING_STREAMING:
die("Source YAML is multi-document, "
"which doesn't support any other action than %s"
% ", ".join(ACTION_SUPPORTING_STREAMING))
if opts["dump"] is yaml_dump:
print("---\n", end="")
else:
print("\0", end="")
if opts.get("loader") is LineLoader:
sys.stdout.flush()
print(output, end="")
if opts.get("loader") is LineLoader:
sys.stdout.flush()
except (InvalidPath, ActionTypeError) as e:
if quiet:
exit(1)
else:
die(str(e))
except InvalidAction as e:
die("'%s' is not a valid action.\n%s"
% (e.args[0], USAGE))
|
Entrypoint of the whole commandline application
|
def create(cls, name, engines, policy=None, comment=None, **kwargs):
"""
Create a new validate policy task.
If a policy is not specified, the engines existing policy will
be validated. Override default validation settings as kwargs.
:param str name: name of task
:param engines: list of engines to validate
:type engines: list(Engine)
:param Policy policy: policy to validate. Uses the engines assigned
policy if none specified.
:param kwargs: see :func:`~policy_validation_settings` for keyword
arguments and default values.
:raises ElementNotFound: engine or policy specified does not exist
:raises CreateElementFailed: failure to create the task
:return: the task
:rtype: ValidatePolicyTask
"""
json = {
'name': name,
'resources': [eng.href for eng in engines],
'policy': policy.href if policy is not None else policy,
'comment': comment}
if kwargs:
json.update(policy_validation_settings(**kwargs))
return ElementCreator(cls, json)
|
Create a new validate policy task.
If a policy is not specified, the engines existing policy will
be validated. Override default validation settings as kwargs.
:param str name: name of task
:param engines: list of engines to validate
:type engines: list(Engine)
:param Policy policy: policy to validate. Uses the engines assigned
policy if none specified.
:param kwargs: see :func:`~policy_validation_settings` for keyword
arguments and default values.
:raises ElementNotFound: engine or policy specified does not exist
:raises CreateElementFailed: failure to create the task
:return: the task
:rtype: ValidatePolicyTask
|
def get_channels_in(self, guild_id: str) -> List[Dict[str, Any]]:
"""Get a list of channels in the guild
Args:
guild_id: id of the guild to fetch channels from
Returns:
List of dictionary objects of channels in the guild. Note the different
types of channels: text, voice, DM, group DM.
https://discordapp.com/developers/docs/resources/channel#channel-object
Example:
[
{
"id": "41771983423143937",
"guild_id": "41771983423143937",
"name": "general",
"type": 0,
"position": 6,
"permission_overwrites": [],
"topic": "24/7 chat about how to gank Mike #2",
"last_message_id": "155117677105512449"
},
{
"id": "155101607195836416",
"guild_id": "41771983423143937",
"name": "ROCKET CHEESE",
"type": 2,
"position": 5,
"permission_overwrites": [],
"bitrate": 64000,
"user_limit": 0
},
{
"last_message_id": "3343820033257021450",
"type": 1,
"id": "319674150115610528",
"recipients": [
{
"username": "test",
"discriminator": "9999",
"id": "82198898841029460",
"avatar": "33ecab261d4681afa4d85a04691c4a01"
}
]
}
]
"""
return self._query(f'guilds/{guild_id}/channels', 'GET')
|
Get a list of channels in the guild
Args:
guild_id: id of the guild to fetch channels from
Returns:
List of dictionary objects of channels in the guild. Note the different
types of channels: text, voice, DM, group DM.
https://discordapp.com/developers/docs/resources/channel#channel-object
Example:
[
{
"id": "41771983423143937",
"guild_id": "41771983423143937",
"name": "general",
"type": 0,
"position": 6,
"permission_overwrites": [],
"topic": "24/7 chat about how to gank Mike #2",
"last_message_id": "155117677105512449"
},
{
"id": "155101607195836416",
"guild_id": "41771983423143937",
"name": "ROCKET CHEESE",
"type": 2,
"position": 5,
"permission_overwrites": [],
"bitrate": 64000,
"user_limit": 0
},
{
"last_message_id": "3343820033257021450",
"type": 1,
"id": "319674150115610528",
"recipients": [
{
"username": "test",
"discriminator": "9999",
"id": "82198898841029460",
"avatar": "33ecab261d4681afa4d85a04691c4a01"
}
]
}
]
|
def get_time_objects_from_model_timesteps(cls, times, start):
"""
Calculate the datetimes of the model timesteps
times should start at 0 and be in seconds
"""
modelTimestep = []
newtimes = []
for i in xrange(0, len(times)):
try:
modelTimestep.append(times[i+1] - times[i])
except StandardError:
modelTimestep.append(times[i] - times[i-1])
newtimes.append(start + timedelta(seconds=times[i]))
return (modelTimestep, newtimes)
|
Calculate the datetimes of the model timesteps
times should start at 0 and be in seconds
|
def legend_title_header_element(feature, parent):
"""Retrieve legend title header string from definitions."""
_ = feature, parent # NOQA
header = legend_title_header['string_format']
return header.capitalize()
|
Retrieve legend title header string from definitions.
|
def _drop_indices(self):
"""Drops the database indices relating to n-grams."""
self._logger.info('Dropping database indices')
self._conn.execute(constants.DROP_TEXTNGRAM_INDEX_SQL)
self._logger.info('Finished dropping database indices')
|
Drops the database indices relating to n-grams.
|
def run():
"""Command for reflection database objects"""
parser = OptionParser(
version=__version__, description=__doc__,
)
parser.add_option(
'-u', '--url', dest='url',
help='Database URL (connection string)',
)
parser.add_option(
'-r', '--render', dest='render', default='dot',
choices=['plantuml', 'dot'],
help='Output format - plantuml or dot',
)
parser.add_option(
'-l', '--list', dest='list', action='store_true',
help='Output database list of tables and exit',
)
parser.add_option(
'-i', '--include', dest='include',
help='List of tables to include through ","',
)
parser.add_option(
'-e', '--exclude', dest='exclude',
help='List of tables to exlude through ","',
)
(options, args) = parser.parse_args()
if not options.url:
print('-u/--url option required')
exit(1)
engine = create_engine(options.url)
meta = MetaData()
meta.reflect(bind=engine)
if options.list:
print('Database tables:')
tables = sorted(meta.tables.keys())
def _g(l, i):
try:
return tables[i]
except IndexError:
return ''
for i in range(0, len(tables), 2):
print(' {0}{1}{2}'.format(
_g(tables, i),
' ' * (38 - len(_g(tables, i))),
_g(tables, i + 1),
))
exit(0)
tables = set(meta.tables.keys())
if options.include:
tables &= set(map(string.strip, options.include.split(',')))
if options.exclude:
tables -= set(map(string.strip, options.exclude.split(',')))
desc = describe(map(lambda x: operator.getitem(meta.tables, x), tables))
print(getattr(render, options.render)(desc))
|
Command for reflection database objects
|
def monthly_build_list_regex(self):
"""Return the regex for the folder containing builds of a month."""
# Regex for possible builds for the given date
return r'nightly/%(YEAR)s/%(MONTH)s/' % {
'YEAR': self.date.year,
'MONTH': str(self.date.month).zfill(2)}
|
Return the regex for the folder containing builds of a month.
|
def execute(self, conn, block_name, origin_site_name, transaction=False):
"""
Update origin_site_name for a given block_name
"""
if not conn:
dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/Block/UpdateStatus. \
Expects db connection from upper layer.", self.logger.exception)
binds = {"block_name": block_name, "origin_site_name": origin_site_name, "mtime": dbsUtils().getTime(),
"myuser": dbsUtils().getCreateBy()}
self.dbi.processData(self.sql, binds, conn, transaction)
|
Update origin_site_name for a given block_name
|
def echo_warnings_via_pager(warnings: List[WarningTuple], sep: str = '\t') -> None:
"""Output the warnings from a BEL graph with Click and the system's pager."""
# Exit if no warnings
if not warnings:
click.echo('Congratulations! No warnings.')
sys.exit(0)
max_line_width = max(
len(str(exc.line_number))
for _, exc, _ in warnings
)
max_warning_width = max(
len(exc.__class__.__name__)
for _, exc, _ in warnings
)
s1 = '{:>' + str(max_line_width) + '}' + sep
s2 = '{:>' + str(max_warning_width) + '}' + sep
def _make_line(path: str, exc: BELParserWarning):
s = click.style(path, fg='cyan') + sep
s += click.style(s1.format(exc.line_number), fg='blue', bold=True)
s += click.style(s2.format(exc.__class__.__name__),
fg=('red' if exc.__class__.__name__.endswith('Error') else 'yellow'))
s += click.style(exc.line, bold=True) + sep
s += click.style(str(exc))
return s
click.echo_via_pager('\n'.join(
_make_line(path, exc)
for path, exc, _ in warnings
))
|
Output the warnings from a BEL graph with Click and the system's pager.
|
def create_matcher(dispatcher, parsers, apptags, matcher='ruled', hosts=tuple(), time_range=None,
time_period=(None, None), patterns=tuple(), invert=False, count=False,
files_with_match=None, max_count=0, only_matching=False, quiet=False,
thread=False, name_cache=None):
"""
Create a matcher engine.
:return: A matcher function.
"""
parsers = CycleParsers(parsers)
max_matches = 1 if quiet else max_count
use_app_rules = matcher != 'unruled'
select_unparsed = matcher == 'unparsed'
register_log_lines = not (quiet or count or files_with_match is not None)
start_dt, end_dt = get_mktime_period(time_period)
pattern_search = create_search_function(invert, only_matching)
dispatch_selected = dispatcher.dispatch_selected
dispatch_context = dispatcher.dispatch_context
display_progress_bar = sys.stdout.isatty() and all(c.name != 'stdout' for c in dispatcher.channels)
def process_logfile(source, apps, encoding='utf-8'):
log_parser = next(parsers)
first_event = None
last_event = None
app_thread = None
selected_data = None
line_counter = 0
unknown_counter = 0
selected_counter = 0
extra_tags = Counter()
dispatcher.reset()
read_size = 0
progress_bar = None
with open_resource(source) as logfile:
# Set counters and status
logfile_name = logfile.name
fstat = os.fstat(logfile.fileno())
file_mtime = datetime.datetime.fromtimestamp(fstat.st_mtime)
file_year = file_mtime.year
file_month = file_mtime.month
prev_year = file_year - 1
if display_progress_bar:
read_size = 0
progress_bar = ProgressBar(sys.stdout, fstat.st_size, logfile_name)
for line in logfile:
line = line.decode(encoding)
line_counter += 1
if line[-1] != '\n':
line += '\n'
if display_progress_bar:
read_size += len(line)
if not line_counter % 100:
progress_bar.redraw(read_size)
###
# Parses the line and extracts the log data
log_match = log_parser.match(line)
if log_match is None:
# The current parser doesn't match: try another available parser.
next_parser, log_match = parsers.detect(line)
if log_match is not None:
log_parser = next_parser
elif line_counter == 1:
logger.warning("the file '{}' has an unknown format, skip ...".format(logfile_name))
break
else:
unknown_counter += 1
continue
log_data = log_parser.get_data(log_match)
###
# Process last event repetition (eg. 'last message repeated N times' RFC 3164's logs)
if getattr(log_data, 'repeat', None) is not None:
if selected_data is not None:
repeat = int(log_data.repeat)
if not thread:
selected_counter += repeat
if use_app_rules:
app = log_parser.app or get_app(selected_data, apps, apptags, extra_tags)
app.increase_last(repeat)
app.matches += 1
dispatch_context(
key=(app, app_thread),
filename=logfile_name,
line_number=line_counter,
rawlog=line
)
selected_data = None
continue
selected_data = None
###
# Parse the log's timestamp and gets the event datetime
year = getattr(
log_data, 'year',
prev_year if MONTHMAP[log_data.month] != '01' and file_month == 1 else file_year
)
event_dt = get_mktime(
year=year,
month=log_data.month,
day=log_data.day,
ltime=log_data.ltime
)
###
# Scope exclusions
if event_dt is None or event_dt < start_dt:
# Excludes lines older than the start datetime
continue
elif event_dt > end_dt:
# Excludes lines newer than the end datetime
if fstat.st_mtime < event_dt:
logger.error("found anomaly with mtime of file %r at line %d", logfile_name, line_counter)
logger.warning("newer event at line %d: skip the rest of the file %r", line_counter, logfile_name)
break
elif time_range is not None and not time_range.between(log_data.ltime):
# Excludes lines not in time range
continue
elif hosts and not has_host_match(log_data, hosts):
# Excludes lines with host restriction
continue
###
# Search log line with provided not-empty pattern(s)
pattern_matched, match, rawlog = pattern_search(line, patterns)
if not pattern_matched and not thread:
dispatch_context(filename=logfile_name, line_number=line_counter, rawlog=rawlog)
continue
###
# App parsing: get the app from parser or from the log data
app = log_parser.app or get_app(log_data, apps, apptags, extra_tags)
if app is None:
# Unmatchable tag --> skip the line
continue
elif use_app_rules:
# Parse the log message with app's rules
app_matched, has_full_match, app_thread, output_data = app.match_rules(log_data)
if not pattern_matched and app_matched and app_thread is None:
continue
if output_data:
rawlog = name_cache.match_to_string(log_match, log_parser.parser.groupindex, output_data)
if app_matched:
app.matches += 1
if not has_full_match or select_unparsed:
dispatch_context(
key=(app, app_thread),
filename=logfile_name,
line_number=line_counter,
rawlog=rawlog
)
continue
else:
app.unparsed += 1
if not select_unparsed:
dispatch_context(
key=(app, app_thread),
filename=logfile_name,
line_number=line_counter,
rawlog=rawlog
)
continue
###
# Event selected: register event's data and datetime
selected_data = log_data
if first_event is None:
first_event = event_dt
last_event = event_dt
else:
if first_event > event_dt:
first_event = event_dt
if last_event < event_dt:
last_event = event_dt
if pattern_matched:
if max_matches and selected_counter >= max_matches:
# Stops iteration if max_count matches is exceeded
break
selected_counter += 1
if files_with_match:
break
if register_log_lines:
dispatch_selected(
key=(app, app_thread),
filename=logfile_name,
line_number=line_counter,
log_data=log_data,
rawlog=rawlog,
match=match
)
elif register_log_lines and not only_matching:
# Thread matching
dispatch_context(
key=(app, app_thread),
filename=logfile_name,
line_number=line_counter,
rawlog=rawlog
)
if display_progress_bar:
progress_bar.redraw(fstat.st_size)
try:
for key in list(dispatcher.keys()):
dispatcher.flush(key)
except (NameError, AttributeError):
pass
# If count option is enabled then register only the number of matched lines.
if files_with_match and selected_counter or files_with_match is False and not selected_counter:
dispatch_selected(filename=logfile.name)
elif count:
dispatch_selected(filename=logfile.name, counter=selected_counter)
return MatcherResult(
lines=line_counter,
matches=selected_counter,
unknown=unknown_counter,
extra_tags=extra_tags,
first_event=first_event,
last_event=last_event
)
return process_logfile
|
Create a matcher engine.
:return: A matcher function.
|
def set_password(self, service, username, password):
"""Set password for the username of the service
"""
password = self._encrypt(password or '')
keyring_working_copy = copy.deepcopy(self._keyring)
service_entries = keyring_working_copy.get(service)
if not service_entries:
service_entries = {}
keyring_working_copy[service] = service_entries
service_entries[username] = password
save_result = self._save_keyring(keyring_working_copy)
if save_result == self.OK:
self._keyring_dict = keyring_working_copy
return
elif save_result == self.CONFLICT:
# check if we can avoid updating
self.docs_entry, keyring_dict = self._read()
existing_pwd = self._get_entry(self._keyring, service, username)
conflicting_pwd = self._get_entry(keyring_dict, service, username)
if conflicting_pwd == password:
# if someone else updated it to the same value then we are done
self._keyring_dict = keyring_working_copy
return
elif conflicting_pwd is None or conflicting_pwd == existing_pwd:
# if doesn't already exist or is unchanged then update it
new_service_entries = keyring_dict.get(service, {})
new_service_entries[username] = password
keyring_dict[service] = new_service_entries
save_result = self._save_keyring(keyring_dict)
if save_result == self.OK:
self._keyring_dict = keyring_dict
return
else:
raise errors.PasswordSetError(
'Failed write after conflict detected')
else:
raise errors.PasswordSetError(
'Conflict detected, service:%s and username:%s was '
'set to a different value by someone else' % (
service,
username,
),
)
raise errors.PasswordSetError('Could not save keyring')
|
Set password for the username of the service
|
def _add_child(self, collection, set, child):
"""Adds 'child' to 'collection', first checking 'set' to see if it's
already present."""
added = None
for c in child:
if c not in set:
set.add(c)
collection.append(c)
added = 1
if added:
self._children_reset()
|
Adds 'child' to 'collection', first checking 'set' to see if it's
already present.
|
def resolve_inputs(self, layers):
'''Resolve the names of inputs for this layer into shape tuples.
Parameters
----------
layers : list of :class:`Layer`
A list of the layers that are available for resolving inputs.
Raises
------
theanets.util.ConfigurationError :
If an input cannot be resolved.
'''
resolved = {}
for name, shape in self._input_shapes.items():
if shape is None:
name, shape = self._resolve_shape(name, layers)
resolved[name] = shape
self._input_shapes = resolved
|
Resolve the names of inputs for this layer into shape tuples.
Parameters
----------
layers : list of :class:`Layer`
A list of the layers that are available for resolving inputs.
Raises
------
theanets.util.ConfigurationError :
If an input cannot be resolved.
|
def _document_structure(self):
"""Document the structure of the dataset."""
logger.debug("Documenting dataset structure")
key = self.get_structure_key()
text = json.dumps(self._structure_parameters, indent=2, sort_keys=True)
self.put_text(key, text)
key = self.get_dtool_readme_key()
self.put_text(key, self._dtool_readme_txt)
|
Document the structure of the dataset.
|
def ip_address_list(ips):
""" IP address range validation and expansion. """
# first, try it as a single IP address
try:
return ip_address(ips)
except ValueError:
pass
# then, consider it as an ipaddress.IPv[4|6]Network instance and expand it
return list(ipaddress.ip_network(u(ips)).hosts())
|
IP address range validation and expansion.
|
def cov_error(self, comp_cov, score_metric="frobenius"):
"""Computes the covariance error vs. comp_cov.
May require self.path_
Parameters
----------
comp_cov : array-like, shape = (n_features, n_features)
The precision to compare with.
This should normally be the test sample covariance/precision.
scaling : bool
If True, the squared error norm is divided by n_features.
If False (default), the squared error norm is not rescaled.
score_metric : str
The type of norm used to compute the error between the estimated
self.precision, self.covariance and the reference `comp_cov`.
Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
- 'kl': kl-divergence
- 'quadratic': quadratic loss
- 'log_likelihood': negative log likelihood
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The min error between `self.covariance_` and `comp_cov`.
If self.precision_ is a list, returns errors for each matrix, otherwise
returns a scalar.
"""
if not isinstance(self.precision_, list):
return _compute_error(
comp_cov, self.covariance_, self.precision_, score_metric
)
path_errors = []
for lidx, lam in enumerate(self.path_):
path_errors.append(
_compute_error(
comp_cov,
self.covariance_[lidx],
self.precision_[lidx],
score_metric,
)
)
return np.array(path_errors)
|
Computes the covariance error vs. comp_cov.
May require self.path_
Parameters
----------
comp_cov : array-like, shape = (n_features, n_features)
The precision to compare with.
This should normally be the test sample covariance/precision.
scaling : bool
If True, the squared error norm is divided by n_features.
If False (default), the squared error norm is not rescaled.
score_metric : str
The type of norm used to compute the error between the estimated
self.precision, self.covariance and the reference `comp_cov`.
Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
- 'kl': kl-divergence
- 'quadratic': quadratic loss
- 'log_likelihood': negative log likelihood
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The min error between `self.covariance_` and `comp_cov`.
If self.precision_ is a list, returns errors for each matrix, otherwise
returns a scalar.
|
def _get_trailing_whitespace(marker, s):
"""Return the whitespace content trailing the given 'marker' in string 's',
up to and including a newline.
"""
suffix = ''
start = s.index(marker) + len(marker)
i = start
while i < len(s):
if s[i] in ' \t':
suffix += s[i]
elif s[i] in '\r\n':
suffix += s[i]
if s[i] == '\r' and i + 1 < len(s) and s[i + 1] == '\n':
suffix += s[i + 1]
break
else:
break
i += 1
return suffix
|
Return the whitespace content trailing the given 'marker' in string 's',
up to and including a newline.
|
async def ehlo(self, from_host=None):
"""
Sends a SMTP 'EHLO' command. - Identifies the client and starts the
session.
If given ``from`_host`` is None, defaults to the client FQDN.
For further details, please check out `RFC 5321 § 4.1.1.1`_.
Args:
from_host (str or None): Name to use to identify the client.
Raises:
ConnectionResetError: If the connection with the server is
unexpectedely lost.
SMTPCommandFailedError: If the server refuses our EHLO greeting.
Returns:
(int, str): A (code, message) 2-tuple containing the server
response.
.. _`RFC 5321 § 4.1.1.1`: https://tools.ietf.org/html/rfc5321#section-4.1.1.1
"""
if from_host is None:
from_host = self.fqdn
code, message = await self.do_cmd("EHLO", from_host)
self.last_ehlo_response = (code, message)
extns, auths = SMTP.parse_esmtp_extensions(message)
self.esmtp_extensions = extns
self.auth_mechanisms = auths
self.supports_esmtp = True
return code, message
|
Sends a SMTP 'EHLO' command. - Identifies the client and starts the
session.
If given ``from`_host`` is None, defaults to the client FQDN.
For further details, please check out `RFC 5321 § 4.1.1.1`_.
Args:
from_host (str or None): Name to use to identify the client.
Raises:
ConnectionResetError: If the connection with the server is
unexpectedely lost.
SMTPCommandFailedError: If the server refuses our EHLO greeting.
Returns:
(int, str): A (code, message) 2-tuple containing the server
response.
.. _`RFC 5321 § 4.1.1.1`: https://tools.ietf.org/html/rfc5321#section-4.1.1.1
|
def get_identity(identity):
""" Returns a (user_obj, None) tuple or a (None, group_obj) tuple depending on the considered
instance.
"""
if isinstance(identity, AnonymousUser):
return identity, None
if isinstance(identity, get_user_model()):
return identity, None
elif isinstance(identity, Group):
return None, identity
else: # pragma: no cover
raise NotUserNorGroup(
'User/AnonymousUser or Group instance is required '
'(got {})'.format(identity),
)
|
Returns a (user_obj, None) tuple or a (None, group_obj) tuple depending on the considered
instance.
|
def template_filter(self, param=None):
"""Returns a decorator that adds the wrapped function to dictionary of template filters.
The wrapped function is keyed by either the supplied param (if supplied)
or by the wrapped functions name.
:param param: Optional name to use instead of the name of the function to be wrapped
:return: A decorator to wrap a template filter function
:rtype: callable
"""
def deco(func):
name = param or func.__name__
self.filters[name] = func
return func
return deco
|
Returns a decorator that adds the wrapped function to dictionary of template filters.
The wrapped function is keyed by either the supplied param (if supplied)
or by the wrapped functions name.
:param param: Optional name to use instead of the name of the function to be wrapped
:return: A decorator to wrap a template filter function
:rtype: callable
|
def put(self, request, bot_id, id, format=None):
"""
Update existing Telegram chat state
---
serializer: TelegramChatStateSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
"""
return super(TelegramChatStateDetail, self).put(request, bot_id, id, format)
|
Update existing Telegram chat state
---
serializer: TelegramChatStateSerializer
responseMessages:
- code: 401
message: Not authenticated
- code: 400
message: Not valid request
|
def add_schema(self, database, schema):
"""Add a schema to the set of known schemas (case-insensitive)
:param str database: The database name to add.
:param str schema: The schema name to add.
"""
self.schemas.add((_lower(database), _lower(schema)))
|
Add a schema to the set of known schemas (case-insensitive)
:param str database: The database name to add.
:param str schema: The schema name to add.
|
def load_lists(keys=[], values=[], name='NT'):
""" Map namedtuples given a pair of key, value lists. """
mapping = dict(zip(keys, values))
return mapper(mapping, _nt_name=name)
|
Map namedtuples given a pair of key, value lists.
|
def from_etree(cls, etree_element):
"""
creates a ``SaltLayer`` instance from the etree representation of an
<layers> element from a SaltXMI file.
"""
ins = SaltElement.from_etree(etree_element)
# TODO: this looks dangerous, ask Stackoverflow about it!
# convert SaltElement into SaltLayer
ins.__class__ = SaltLayer.mro()[0]
# add nodes and edges that belong to this layer (if any)
for element in ('nodes', 'edges'):
elem_list = []
xpath_result = etree_element.xpath('@'+element)
if xpath_result:
val_str = xpath_result[0]
elem_list.extend(int(elem_id)
for elem_id in DIGITS.findall(val_str))
setattr(ins, element, elem_list)
return ins
|
creates a ``SaltLayer`` instance from the etree representation of an
<layers> element from a SaltXMI file.
|
def pattern_filter(items, whitelist=None, blacklist=None, key=None):
"""This filters `items` by a regular expression `whitelist` and/or
`blacklist`, with the `blacklist` taking precedence. An optional `key`
function can be provided that will be passed each item.
"""
key = key or __return_self
if whitelist:
whitelisted = _filter(items, whitelist, key)
if blacklist:
blacklisted = _filter(items, blacklist, key)
# Remove any blacklisted items from the whitelisted ones.
whitelisted.difference_update(blacklisted)
return [item for item in items if key(item) in whitelisted]
elif blacklist:
blacklisted = _filter(items, blacklist, key)
return [item for item in items if key(item) not in blacklisted]
else:
return items
|
This filters `items` by a regular expression `whitelist` and/or
`blacklist`, with the `blacklist` taking precedence. An optional `key`
function can be provided that will be passed each item.
|
def body(self, value):
"""Sets the request body; handles logging and length measurement."""
self.__body = value
if value is not None:
# Avoid calling len() which cannot exceed 4GiB in 32-bit python.
body_length = getattr(
self.__body, 'length', None) or len(self.__body)
self.headers['content-length'] = str(body_length)
else:
self.headers.pop('content-length', None)
# This line ensures we don't try to print large requests.
if not isinstance(value, (type(None), six.string_types)):
self.loggable_body = '<media body>'
|
Sets the request body; handles logging and length measurement.
|
def get_user_config_dir():
"""
Return the path to the user s-tui config directory
"""
user_home = os.getenv('XDG_CONFIG_HOME')
if user_home is None or not user_home:
config_path = os.path.expanduser(os.path.join('~', '.config', 's-tui'))
else:
config_path = os.path.join(user_home, 's-tui')
return config_path
|
Return the path to the user s-tui config directory
|
def draw_on_image(self,
image,
color=(0, 255, 0), color_face=None,
color_lines=None, color_points=None,
alpha=1.0, alpha_face=None,
alpha_lines=None, alpha_points=None,
size=1, size_lines=None, size_points=None,
raise_if_out_of_image=False):
"""
Draw all polygons onto a given image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as set in
``PolygonsOnImage.shape``.
color : iterable of int, optional
The color to use for the whole polygons.
Must correspond to the channel layout of the image. Usually RGB.
The values for `color_face`, `color_lines` and `color_points`
will be derived from this color if they are set to ``None``.
This argument has no effect if `color_face`, `color_lines`
and `color_points` are all set anything other than ``None``.
color_face : None or iterable of int, optional
The color to use for the inner polygon areas (excluding perimeters).
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from ``color * 1.0``.
color_lines : None or iterable of int, optional
The color to use for the lines (aka perimeters/borders) of the
polygons. Must correspond to the channel layout of the image.
Usually RGB. If this is ``None``, it will be derived
from ``color * 0.5``.
color_points : None or iterable of int, optional
The color to use for the corner points of the polygons.
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from ``color * 0.5``.
alpha : float, optional
The opacity of the whole polygons, where ``1.0`` denotes
completely visible polygons and ``0.0`` invisible ones.
The values for `alpha_face`, `alpha_lines` and `alpha_points`
will be derived from this alpha value if they are set to ``None``.
This argument has no effect if `alpha_face`, `alpha_lines`
and `alpha_points` are all set anything other than ``None``.
alpha_face : None or number, optional
The opacity of the polygon's inner areas (excluding the perimeters),
where ``1.0`` denotes completely visible inner areas and ``0.0``
invisible ones.
If this is ``None``, it will be derived from ``alpha * 0.5``.
alpha_lines : None or number, optional
The opacity of the polygon's lines (aka perimeters/borders),
where ``1.0`` denotes completely visible perimeters and ``0.0``
invisible ones.
If this is ``None``, it will be derived from ``alpha * 1.0``.
alpha_points : None or number, optional
The opacity of the polygon's corner points, where ``1.0`` denotes
completely visible corners and ``0.0`` invisible ones.
Currently this is an on/off choice, i.e. only ``0.0`` or ``1.0``
are allowed.
If this is ``None``, it will be derived from ``alpha * 1.0``.
size : int, optional
Size of the polygons.
The sizes of the line and points are derived from this value,
unless they are set.
size_lines : None or int, optional
Thickness of the polygon lines (aka perimeter/border).
If ``None``, this value is derived from `size`.
size_points : int, optional
The size of all corner points. If set to ``C``, each corner point
will be drawn as a square of size ``C x C``.
raise_if_out_of_image : bool, optional
Whether to raise an error if any polygon is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
image : (H,W,C) ndarray
Image with drawn polygons.
"""
for poly in self.polygons:
image = poly.draw_on_image(
image,
color=color,
color_face=color_face,
color_lines=color_lines,
color_points=color_points,
alpha=alpha,
alpha_face=alpha_face,
alpha_lines=alpha_lines,
alpha_points=alpha_points,
size=size,
size_lines=size_lines,
size_points=size_points,
raise_if_out_of_image=raise_if_out_of_image
)
return image
|
Draw all polygons onto a given image.
Parameters
----------
image : (H,W,C) ndarray
The image onto which to draw the bounding boxes.
This image should usually have the same shape as set in
``PolygonsOnImage.shape``.
color : iterable of int, optional
The color to use for the whole polygons.
Must correspond to the channel layout of the image. Usually RGB.
The values for `color_face`, `color_lines` and `color_points`
will be derived from this color if they are set to ``None``.
This argument has no effect if `color_face`, `color_lines`
and `color_points` are all set anything other than ``None``.
color_face : None or iterable of int, optional
The color to use for the inner polygon areas (excluding perimeters).
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from ``color * 1.0``.
color_lines : None or iterable of int, optional
The color to use for the lines (aka perimeters/borders) of the
polygons. Must correspond to the channel layout of the image.
Usually RGB. If this is ``None``, it will be derived
from ``color * 0.5``.
color_points : None or iterable of int, optional
The color to use for the corner points of the polygons.
Must correspond to the channel layout of the image. Usually RGB.
If this is ``None``, it will be derived from ``color * 0.5``.
alpha : float, optional
The opacity of the whole polygons, where ``1.0`` denotes
completely visible polygons and ``0.0`` invisible ones.
The values for `alpha_face`, `alpha_lines` and `alpha_points`
will be derived from this alpha value if they are set to ``None``.
This argument has no effect if `alpha_face`, `alpha_lines`
and `alpha_points` are all set anything other than ``None``.
alpha_face : None or number, optional
The opacity of the polygon's inner areas (excluding the perimeters),
where ``1.0`` denotes completely visible inner areas and ``0.0``
invisible ones.
If this is ``None``, it will be derived from ``alpha * 0.5``.
alpha_lines : None or number, optional
The opacity of the polygon's lines (aka perimeters/borders),
where ``1.0`` denotes completely visible perimeters and ``0.0``
invisible ones.
If this is ``None``, it will be derived from ``alpha * 1.0``.
alpha_points : None or number, optional
The opacity of the polygon's corner points, where ``1.0`` denotes
completely visible corners and ``0.0`` invisible ones.
Currently this is an on/off choice, i.e. only ``0.0`` or ``1.0``
are allowed.
If this is ``None``, it will be derived from ``alpha * 1.0``.
size : int, optional
Size of the polygons.
The sizes of the line and points are derived from this value,
unless they are set.
size_lines : None or int, optional
Thickness of the polygon lines (aka perimeter/border).
If ``None``, this value is derived from `size`.
size_points : int, optional
The size of all corner points. If set to ``C``, each corner point
will be drawn as a square of size ``C x C``.
raise_if_out_of_image : bool, optional
Whether to raise an error if any polygon is fully
outside of the image. If set to False, no error will be raised and
only the parts inside the image will be drawn.
Returns
-------
image : (H,W,C) ndarray
Image with drawn polygons.
|
def send_highspeed(self, data, progress_callback):
"""Send a script to a device at highspeed, reporting progress.
This method takes a binary blob and downloads it to the device as fast
as possible, calling the passed progress_callback periodically with
updates on how far it has gotten.
Args:
data (bytes): The binary blob that should be sent to the device at highspeed.
progress_callback (callable): A function that will be called periodically to
report progress. The signature must be callback(done_count, total_count)
where done_count and total_count will be passed as integers.
"""
if not self.connected:
raise HardwareError("Cannot send a script if we are not in a connected state")
if isinstance(data, str) and not isinstance(data, bytes):
raise ArgumentError("You must send bytes or bytearray to _send_highspeed", type=type(data))
if not isinstance(data, bytes):
data = bytes(data)
try:
self._on_progress = progress_callback
self._loop.run_coroutine(self.adapter.send_script(0, data))
finally:
self._on_progress = None
|
Send a script to a device at highspeed, reporting progress.
This method takes a binary blob and downloads it to the device as fast
as possible, calling the passed progress_callback periodically with
updates on how far it has gotten.
Args:
data (bytes): The binary blob that should be sent to the device at highspeed.
progress_callback (callable): A function that will be called periodically to
report progress. The signature must be callback(done_count, total_count)
where done_count and total_count will be passed as integers.
|
def solve_mbar(u_kn_nonzero, N_k_nonzero, f_k_nonzero, solver_protocol=None):
"""Solve MBAR self-consistent equations using some sequence of equation solvers.
Parameters
----------
u_kn_nonzero : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
for the nonempty states
N_k_nonzero : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state for the nonempty states
f_k_nonzero : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies for the nonempty states
solver_protocol: tuple(dict()), optional, default=None
Optional list of dictionaries of steps in solver protocol.
If None, a default protocol will be used.
Returns
-------
f_k : np.ndarray
The converged reduced free energies.
all_results : list(dict())
List of results from each step of solver_protocol. Each element in
list contains the results dictionary from solve_mbar_once()
for the corresponding step.
Notes
-----
This function requires that N_k_nonzero > 0--that is, you should have
already dropped all the states for which you have no samples.
Internally, this function works in a reduced coordinate system defined
by subtracting off the first component of f_k and fixing that component
to be zero.
This function calls `solve_mbar_once()` multiple times to achieve
converged results. Generally, a single call to solve_mbar_once()
will not give fully converged answers because of limited numerical precision.
Each call to `solve_mbar_once()` re-conditions the nonlinear
equations using the current guess.
"""
if solver_protocol is None:
solver_protocol = DEFAULT_SOLVER_PROTOCOL
for protocol in solver_protocol:
if protocol['method'] is None:
protocol['method'] = DEFAULT_SOLVER_METHOD
all_results = []
for k, options in enumerate(solver_protocol):
f_k_nonzero, results = solve_mbar_once(u_kn_nonzero, N_k_nonzero, f_k_nonzero, **options)
all_results.append(results)
all_results.append(("Final gradient norm: %.3g" % np.linalg.norm(mbar_gradient(u_kn_nonzero, N_k_nonzero, f_k_nonzero))))
return f_k_nonzero, all_results
|
Solve MBAR self-consistent equations using some sequence of equation solvers.
Parameters
----------
u_kn_nonzero : np.ndarray, shape=(n_states, n_samples), dtype='float'
The reduced potential energies, i.e. -log unnormalized probabilities
for the nonempty states
N_k_nonzero : np.ndarray, shape=(n_states), dtype='int'
The number of samples in each state for the nonempty states
f_k_nonzero : np.ndarray, shape=(n_states), dtype='float'
The reduced free energies for the nonempty states
solver_protocol: tuple(dict()), optional, default=None
Optional list of dictionaries of steps in solver protocol.
If None, a default protocol will be used.
Returns
-------
f_k : np.ndarray
The converged reduced free energies.
all_results : list(dict())
List of results from each step of solver_protocol. Each element in
list contains the results dictionary from solve_mbar_once()
for the corresponding step.
Notes
-----
This function requires that N_k_nonzero > 0--that is, you should have
already dropped all the states for which you have no samples.
Internally, this function works in a reduced coordinate system defined
by subtracting off the first component of f_k and fixing that component
to be zero.
This function calls `solve_mbar_once()` multiple times to achieve
converged results. Generally, a single call to solve_mbar_once()
will not give fully converged answers because of limited numerical precision.
Each call to `solve_mbar_once()` re-conditions the nonlinear
equations using the current guess.
|
def _get_config(config_file):
'''find, read and parse configuraton.'''
parser = ConfigParser.SafeConfigParser()
if os.path.lexists(config_file):
try:
log.info('Reading config: %s', config_file)
inp = open(config_file)
parser.readfp(inp)
return parser
except (IOError, ConfigParser.ParsingError), err:
raise ConfigError("Failed to read configuration %s\n%s" % (config_file, err))
return None
|
find, read and parse configuraton.
|
def listen(self, address, ssl=False, family=0, flags=0, ipc=False, backlog=128):
"""Create a new transport, bind it to *address*, and start listening
for new connections.
See :func:`create_server` for a description of *address* and the
supported keyword arguments.
"""
handles = []
handle_args = ()
if isinstance(address, six.string_types):
handle_type = pyuv.Pipe
handle_args = (ipc,)
addresses = [address]
elif isinstance(address, tuple):
handle_type = pyuv.TCP
result = getaddrinfo(address[0], address[1], family, socket.SOCK_STREAM,
socket.IPPROTO_TCP, flags)
addresses = [res[4] for res in result]
elif isinstance(address, pyuv.Stream):
handles.append(address)
addresses = []
else:
raise TypeError('expecting a string, tuple or pyuv.Stream')
for addr in addresses:
handle = handle_type(self._hub.loop, *handle_args)
try:
if compat.pyuv_pipe_helper(handle, handle_args, 'bind', addr):
handles.append(handle)
break
handle.bind(addr)
except pyuv.error.UVError as e:
self._log.warning('bind error {!r}, skipping {}', e[0], saddr(addr))
continue
handles.append(handle)
addresses = []
for handle in handles:
if backlog is not None:
callback = functools.partial(self._on_new_connection, ssl=ssl)
handle.listen(callback, backlog)
addr = handle.getsockname()
self._log.debug('listen on {}', saddr(addr))
addresses.append(addr)
self._handles += handles
self._addresses += addresses
|
Create a new transport, bind it to *address*, and start listening
for new connections.
See :func:`create_server` for a description of *address* and the
supported keyword arguments.
|
def _spectrogram_mono(self, x):
'''x.shape : (None, 1, len_src),
returns 2D batch of a mono power-spectrogram'''
x = K.permute_dimensions(x, [0, 2, 1])
x = K.expand_dims(x, 3) # add a dummy dimension (channel axis)
subsample = (self.n_hop, 1)
output_real = K.conv2d(x, self.dft_real_kernels,
strides=subsample,
padding=self.padding,
data_format='channels_last')
output_imag = K.conv2d(x, self.dft_imag_kernels,
strides=subsample,
padding=self.padding,
data_format='channels_last')
output = output_real ** 2 + output_imag ** 2
# now shape is (batch_sample, n_frame, 1, freq)
if self.image_data_format == 'channels_last':
output = K.permute_dimensions(output, [0, 3, 1, 2])
else:
output = K.permute_dimensions(output, [0, 2, 3, 1])
return output
|
x.shape : (None, 1, len_src),
returns 2D batch of a mono power-spectrogram
|
def _check_branching(X,Xsamples,restart,threshold=0.25):
"""\
Check whether time series branches.
Parameters
----------
X (np.array): current time series data.
Xsamples (np.array): list of previous branching samples.
restart (int): counts number of restart trials.
threshold (float, optional): sets threshold for attractor
identification.
Returns
-------
check : bool
true if branching realization
Xsamples
updated list
"""
check = True
if restart == 0:
Xsamples.append(X)
else:
for Xcompare in Xsamples:
Xtmax_diff = np.absolute(X[-1,:] - Xcompare[-1,:])
# If the second largest element is smaller than threshold
# set check to False, i.e. at least two elements
# need to change in order to have a branching.
# If we observe all parameters of the system,
# a new attractor state must involve changes in two
# variables.
if np.partition(Xtmax_diff,-2)[-2] < threshold:
check = False
if check:
Xsamples.append(X)
if not check:
logg.m('realization {}:'.format(restart), 'no new branch', v=4)
else:
logg.m('realization {}:'.format(restart), 'new branch', v=4)
return check, Xsamples
|
\
Check whether time series branches.
Parameters
----------
X (np.array): current time series data.
Xsamples (np.array): list of previous branching samples.
restart (int): counts number of restart trials.
threshold (float, optional): sets threshold for attractor
identification.
Returns
-------
check : bool
true if branching realization
Xsamples
updated list
|
def findHotspot( self, name ):
"""
Finds the hotspot based on the inputed name.
:param name | <str>
:return <XNodeHotspot> || None
"""
for hotspot in self._hotspots:
if ( hotspot.name() == name ):
return hotspot
return None
|
Finds the hotspot based on the inputed name.
:param name | <str>
:return <XNodeHotspot> || None
|
def main(inputstructs, inputpdbids):
"""Main function. Calls functions for processing, report generation and visualization."""
pdbid, pdbpath = None, None
# #@todo For multiprocessing, implement better stacktracing for errors
# Print title and version
title = "* Protein-Ligand Interaction Profiler v%s *" % __version__
write_message('\n' + '*' * len(title) + '\n')
write_message(title)
write_message('\n' + '*' * len(title) + '\n\n')
outputprefix = config.OUTPUTFILENAME
if inputstructs is not None: # Process PDB file(s)
num_structures = len(inputstructs)
inputstructs = remove_duplicates(inputstructs)
read_from_stdin = False
for inputstruct in inputstructs:
if inputstruct == '-':
inputstruct = sys.stdin.read()
read_from_stdin = True
if config.RAWSTRING:
if sys.version_info < (3,):
inputstruct = bytes(inputstruct).decode('unicode_escape')
else:
inputstruct = bytes(inputstruct, 'utf8').decode('unicode_escape')
else:
if os.path.getsize(inputstruct) == 0:
sysexit(2, 'Empty PDB file\n') # Exit if input file is empty
if num_structures > 1:
basename = inputstruct.split('.')[-2].split('/')[-1]
config.OUTPATH = '/'.join([config.BASEPATH, basename])
outputprefix = 'report'
process_pdb(inputstruct, config.OUTPATH, as_string=read_from_stdin, outputprefix=outputprefix)
else: # Try to fetch the current PDB structure(s) directly from the RCBS server
num_pdbids = len(inputpdbids)
inputpdbids = remove_duplicates(inputpdbids)
for inputpdbid in inputpdbids:
pdbpath, pdbid = download_structure(inputpdbid)
if num_pdbids > 1:
config.OUTPATH = '/'.join([config.BASEPATH, pdbid[1:3].upper(), pdbid.upper()])
outputprefix = 'report'
process_pdb(pdbpath, config.OUTPATH, outputprefix=outputprefix)
if (pdbid is not None or inputstructs is not None) and config.BASEPATH is not None:
if config.BASEPATH in ['.', './']:
write_message('\nFinished analysis. Find the result files in the working directory.\n\n')
else:
write_message('\nFinished analysis. Find the result files in %s\n\n' % config.BASEPATH)
|
Main function. Calls functions for processing, report generation and visualization.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.